summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README.md12
-rw-r--r--README_AWS.md200
-rw-r--r--README_GCE.md136
-rw-r--r--README_libvirt.md163
-rw-r--r--README_openstack.md87
-rw-r--r--README_vagrant.md1
-rw-r--r--bin/README.md6
-rwxr-xr-xbin/cluster424
-rw-r--r--callback_plugins/aa_version_requirement.py2
-rw-r--r--docs/repo_structure.md6
-rw-r--r--filter_plugins/oo_filters.py15
-rw-r--r--inventory/README.md6
-rw-r--r--inventory/aws/hosts/ec2.ini189
-rwxr-xr-xinventory/aws/hosts/ec2.py1511
-rw-r--r--inventory/aws/hosts/hosts1
-rw-r--r--inventory/byo/hosts.origin.example49
-rw-r--r--inventory/byo/hosts.ose.example53
-rwxr-xr-xinventory/gce/hosts/gce.py477
-rw-r--r--inventory/gce/hosts/hosts1
-rw-r--r--inventory/libvirt/hosts/hosts1
-rw-r--r--inventory/libvirt/hosts/libvirt.ini20
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py191
-rw-r--r--inventory/openstack/hosts/hosts1
-rwxr-xr-xinventory/openstack/hosts/openstack.py247
-rw-r--r--openshift-ansible.spec153
-rw-r--r--playbooks/README.md2
-rw-r--r--playbooks/aws/README.md268
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml48
-rw-r--r--playbooks/aws/openshift-cluster/add_nodes.yml35
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml150
-rw-r--r--playbooks/aws/openshift-cluster/build_node_group.yml47
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/aws/openshift-cluster/config.yml37
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml54
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py303
-rw-r--r--playbooks/aws/openshift-cluster/list.yml23
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml157
-rw-r--r--playbooks/aws/openshift-cluster/provision_nodes.yml47
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml32
-rw-r--r--playbooks/aws/openshift-cluster/service.yml31
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml188
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j222
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml77
-rw-r--r--playbooks/aws/openshift-cluster/update.yml34
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml144
-rw-r--r--playbooks/byo/openshift-checks/README.md48
-rw-r--r--playbooks/byo/openshift-checks/adhoc.yml27
-rw-r--r--playbooks/byo/openshift-checks/health.yml3
-rw-r--r--playbooks/byo/openshift-checks/pre-install.yml3
-rw-r--r--playbooks/byo/openshift-cluster/openshift-prometheus.yml4
-rw-r--r--playbooks/byo/openshift-cluster/openshift-provisioners.yml6
-rw-r--r--playbooks/byo/openshift-etcd/config.yml8
-rw-r--r--playbooks/byo/openshift-etcd/migrate.yml6
-rw-r--r--playbooks/byo/openshift-etcd/restart.yml4
-rw-r--r--playbooks/byo/openshift-etcd/scaleup.yml6
-rw-r--r--playbooks/byo/openshift-master/config.yml6
-rw-r--r--playbooks/byo/openshift-master/restart.yml4
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml2
-rw-r--r--playbooks/byo/openshift-node/config.yml6
-rw-r--r--playbooks/byo/openshift-node/restart.yml4
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml9
-rw-r--r--playbooks/byo/vagrant.yml4
-rw-r--r--playbooks/common/README.md7
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml12
-rw-r--r--playbooks/common/openshift-checks/health.yml6
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml8
-rw-r--r--playbooks/common/openshift-cluster/config.yml38
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml19
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml19
-rw-r--r--playbooks/common/openshift-cluster/initialize_firewall.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml3
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml9
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/restart.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml (renamed from playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml11
-rw-r--r--playbooks/common/openshift-etcd/config.yml1
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml75
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml19
-rw-r--r--playbooks/common/openshift-etcd/service.yml23
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml1
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml23
-rw-r--r--playbooks/common/openshift-master/additional_config.yml (renamed from playbooks/common/openshift-cluster/additional_config.yml)0
-rw-r--r--playbooks/common/openshift-master/config.yml20
-rw-r--r--playbooks/common/openshift-master/scaleup.yml37
-rw-r--r--playbooks/common/openshift-master/service.yml23
-rw-r--r--playbooks/common/openshift-nfs/service.yml21
-rw-r--r--playbooks/common/openshift-node/config.yml18
-rw-r--r--playbooks/common/openshift-node/scaleup.yml50
-rw-r--r--playbooks/common/openshift-node/service.yml26
-rw-r--r--playbooks/gce/README.md4
-rw-r--r--playbooks/gce/openshift-cluster/add_nodes.yml43
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/gce/openshift-cluster/config.yml36
l---------playbooks/gce/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml67
-rw-r--r--playbooks/gce/openshift-cluster/list.yml23
l---------playbooks/gce/openshift-cluster/lookup_plugins1
l---------playbooks/gce/openshift-cluster/roles1
-rw-r--r--playbooks/gce/openshift-cluster/service.yml29
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml65
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml58
-rw-r--r--playbooks/gce/openshift-cluster/update.yml34
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml18
-rw-r--r--playbooks/libvirt/README.md4
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml39
l---------playbooks/libvirt/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml57
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml23
l---------playbooks/libvirt/openshift-cluster/lookup_plugins1
l---------playbooks/libvirt/openshift-cluster/roles1
-rw-r--r--playbooks/libvirt/openshift-cluster/service.yml34
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml11
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml30
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml142
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml65
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/meta-data3
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml23
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/storage-pool.xml6
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data43
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml70
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml37
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml40
-rw-r--r--playbooks/openstack/README.md4
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml25
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml33
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml508
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml152
l---------playbooks/openstack/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml191
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml24
l---------playbooks/openstack/openshift-cluster/lookup_plugins1
l---------playbooks/openstack/openshift-cluster/roles1
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml49
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml34
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml38
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml2
-rw-r--r--roles/calico/defaults/main.yaml2
-rw-r--r--roles/calico_master/defaults/main.yaml2
-rw-r--r--roles/cockpit/defaults/main.yml4
-rw-r--r--roles/dns/README.md45
-rw-r--r--roles/dns/defaults/main.yml2
-rw-r--r--roles/dns/handlers/main.yml5
-rw-r--r--roles/dns/meta/main.yml9
-rw-r--r--roles/dns/tasks/main.yml46
-rw-r--r--roles/dns/templates/Dockerfile11
-rw-r--r--roles/dns/templates/named.conf23
-rw-r--r--roles/dns/templates/named.service.j215
-rw-r--r--roles/dns/templates/openshift-cluster.zone14
-rw-r--r--roles/docker/tasks/main.yml4
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml9
-rw-r--r--roles/docker/templates/crio.conf.j27
-rw-r--r--roles/etcd/defaults/main.yaml4
-rw-r--r--roles/etcd/templates/etcd.conf.j226
-rw-r--r--roles/etcd_common/defaults/main.yml10
-rw-r--r--roles/etcd_migrate/tasks/add_ttls.yml33
-rw-r--r--roles/etcd_migrate/tasks/check.yml3
-rw-r--r--roles/etcd_migrate/tasks/clean_data.yml5
-rw-r--r--roles/etcd_migrate/tasks/main.yml4
-rw-r--r--roles/etcd_migrate/tasks/migrate.yml56
-rw-r--r--roles/flannel_register/defaults/main.yaml5
-rw-r--r--roles/flannel_register/templates/flannel-config.json1
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py9
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py1649
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py9
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py9
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py9
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py9
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py9
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py4
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py9
-rw-r--r--roles/lib_openshift/library/oc_configmap.py9
-rw-r--r--roles/lib_openshift/library/oc_edit.py9
-rw-r--r--roles/lib_openshift/library/oc_env.py9
-rw-r--r--roles/lib_openshift/library/oc_group.py9
-rw-r--r--roles/lib_openshift/library/oc_image.py9
-rw-r--r--roles/lib_openshift/library/oc_label.py9
-rw-r--r--roles/lib_openshift/library/oc_obj.py9
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py9
-rw-r--r--roles/lib_openshift/library/oc_process.py9
-rw-r--r--roles/lib_openshift/library/oc_project.py9
-rw-r--r--roles/lib_openshift/library/oc_pvc.py9
-rw-r--r--roles/lib_openshift/library/oc_route.py16
-rw-r--r--roles/lib_openshift/library/oc_scale.py9
-rw-r--r--roles/lib_openshift/library/oc_secret.py9
-rw-r--r--roles/lib_openshift/library/oc_service.py9
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py9
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py9
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py9
-rw-r--r--roles/lib_openshift/library/oc_user.py9
-rw-r--r--roles/lib_openshift/library/oc_version.py9
-rw-r--r--roles/lib_openshift/library/oc_volume.py9
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_csr.py36
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py4
-rw-r--r--roles/lib_openshift/src/class/oc_adm_csr.py197
-rw-r--r--roles/lib_openshift/src/class/oc_route.py7
-rw-r--r--roles/lib_openshift/src/doc/csr80
-rw-r--r--roles/lib_openshift/src/lib/base.py9
-rw-r--r--roles/lib_openshift/src/sources.yml10
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_adm_csr.yml28
-rw-r--r--roles/lib_utils/library/iam_cert23.py314
-rw-r--r--roles/lib_utils/library/oo_iam_kms.py172
-rw-r--r--roles/nuage_common/defaults/main.yaml3
-rw-r--r--roles/nuage_common/tasks/main.yml27
-rw-r--r--roles/nuage_master/defaults/main.yml4
-rw-r--r--roles/nuage_master/handlers/main.yaml4
-rw-r--r--roles/nuage_master/tasks/main.yaml83
-rwxr-xr-xroles/nuage_master/templates/nuage-master-config-daemonset.j2111
-rwxr-xr-xroles/nuage_master/templates/nuage-node-config-daemonset.j2206
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j241
-rw-r--r--roles/nuage_master/vars/main.yaml12
-rw-r--r--roles/nuage_node/defaults/main.yml4
-rw-r--r--roles/nuage_node/handlers/main.yaml6
-rw-r--r--roles/nuage_node/tasks/main.yaml51
-rw-r--r--roles/nuage_node/templates/vsp-openshift.j229
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_aws_ami_copy/README.md50
-rw-r--r--roles/openshift_aws_ami_copy/tasks/main.yml26
-rw-r--r--roles/openshift_aws_elb/README.md75
-rw-r--r--roles/openshift_aws_elb/defaults/main.yml33
-rw-r--r--roles/openshift_aws_elb/meta/main.yml12
-rw-r--r--roles/openshift_aws_elb/tasks/main.yml57
-rw-r--r--roles/openshift_aws_iam_kms/README.md43
-rw-r--r--roles/openshift_aws_iam_kms/defaults/main.yml (renamed from roles/lib_utils/tasks/main.yml)0
-rw-r--r--roles/openshift_aws_iam_kms/meta/main.yml13
-rw-r--r--roles/openshift_aws_iam_kms/tasks/main.yml18
-rw-r--r--roles/openshift_aws_launch_config/README.md72
-rw-r--r--roles/openshift_aws_launch_config/defaults/main.yml1
-rw-r--r--roles/openshift_aws_launch_config/meta/main.yml12
-rw-r--r--roles/openshift_aws_launch_config/tasks/main.yml50
-rw-r--r--roles/openshift_aws_launch_config/templates/cloud-init.j29
-rw-r--r--roles/openshift_aws_node_group/README.md77
-rw-r--r--roles/openshift_aws_node_group/defaults/main.yml58
-rw-r--r--roles/openshift_aws_node_group/tasks/main.yml32
-rw-r--r--roles/openshift_aws_s3/README.md43
-rw-r--r--roles/openshift_aws_s3/tasks/main.yml6
-rw-r--r--roles/openshift_aws_sg/README.md59
-rw-r--r--roles/openshift_aws_sg/defaults/main.yml48
-rw-r--r--roles/openshift_aws_sg/tasks/main.yml53
-rw-r--r--roles/openshift_aws_ssh_keys/README.md49
-rw-r--r--roles/openshift_aws_ssh_keys/tasks/main.yml8
-rw-r--r--roles/openshift_aws_vpc/README.md62
-rw-r--r--roles/openshift_aws_vpc/defaults/main.yml1
-rw-r--r--roles/openshift_aws_vpc/tasks/main.yml53
-rw-r--r--roles/openshift_cfme/defaults/main.yml2
-rw-r--r--roles/openshift_cfme/tasks/nfs.yml7
-rw-r--r--roles/openshift_cfme/templates/miq-pv-db.yaml.j22
-rw-r--r--roles/openshift_cfme/templates/miq-pv-region.yaml.j22
-rw-r--r--roles/openshift_cfme/templates/miq-pv-server.yaml.j22
-rw-r--r--roles/openshift_cli/meta/main.yml2
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_cli_facts/meta/main.yml15
-rw-r--r--roles/openshift_cli_facts/tasks/main.yml6
-rw-r--r--roles/openshift_clock/defaults/main.yml2
-rw-r--r--roles/openshift_clock/meta/main.yml3
-rw-r--r--roles/openshift_clock/tasks/main.yaml17
-rw-r--r--roles/openshift_common/tasks/main.yml4
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh1
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml19
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml19
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/OWNERS12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json18
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json18
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json18
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json7
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json7
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/OWNERS14
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json69
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/OWNERS12
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-runtime-example.json412
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json18
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json18
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json21
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json18
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json7
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json7
-rw-r--r--roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS1
-rw-r--r--roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json69
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-runtime-example.json412
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json6
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py12
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py146
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py302
-rw-r--r--roles/openshift_health_checker/library/aos_version.py77
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py5
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py11
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py1
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py15
-rw-r--r--roles/openshift_health_checker/test/conftest.py1
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py2
-rw-r--r--roles/openshift_health_checker/test/zz_failure_summary_test.py70
-rw-r--r--roles/openshift_hosted/defaults/main.yml11
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml66
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml3
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml15
l---------roles/openshift_hosted/tasks/registry/storage/registry_config_secret.j21
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml64
-rw-r--r--roles/openshift_hosted/templates/registry_config.j22
-rw-r--r--roles/openshift_hosted/templates/registry_config_secret.j29
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml4
-rw-r--r--roles/openshift_logging/README.md17
-rw-r--r--roles/openshift_logging/defaults/main.yml5
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml17
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml10
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml17
-rw-r--r--roles/openshift_logging/vars/main.yaml2
-rw-r--r--roles/openshift_logging_curator/defaults/main.yml4
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_curator/templates/curator.j22
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml6
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j22
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml4
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j24
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml8
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j24
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml4
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_mux/templates/mux.j22
-rw-r--r--roles/openshift_master/defaults/main.yml9
-rw-r--r--roles/openshift_master/tasks/bootstrap.yml28
-rw-r--r--roles/openshift_master/tasks/main.yml37
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml10
-rw-r--r--roles/openshift_node/defaults/main.yml67
-rw-r--r--roles/openshift_node/handlers/main.yml1
-rw-r--r--roles/openshift_node/meta/main.yml1
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml55
-rw-r--r--roles/openshift_node/tasks/config.yml111
-rw-r--r--roles/openshift_node/tasks/install.yml33
-rw-r--r--roles/openshift_node/tasks/main.yml195
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml46
-rw-r--r--roles/openshift_node/tasks/tuned.yml41
-rw-r--r--roles/openshift_node/templates/node.service.j22
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf25
-rw-r--r--roles/openshift_node/templates/tuned/openshift-node/tuned.conf10
-rw-r--r--roles/openshift_node/templates/tuned/openshift/tuned.conf24
-rw-r--r--roles/openshift_node/templates/tuned/recommend.conf8
-rw-r--r--roles/openshift_node_certificates/defaults/main.yml1
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh65
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml11
-rw-r--r--roles/openshift_prometheus/README.md95
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml74
-rw-r--r--roles/openshift_prometheus/files/openshift_prometheus.exports3
-rw-r--r--roles/openshift_prometheus/meta/main.yaml19
-rw-r--r--roles/openshift_prometheus/tasks/create_pvs.yaml36
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml241
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml26
-rw-r--r--roles/openshift_prometheus/tasks/nfs.yaml44
-rw-r--r--roles/openshift_prometheus/templates/alertmanager.yml.j220
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-server.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prometheus.rules.j24
-rw-r--r--roles/openshift_prometheus/templates/prometheus.yml.j2174
-rw-r--r--roles/openshift_prometheus/templates/prometheus_deployment.j2240
-rw-r--r--roles/openshift_prometheus/tests/inventory2
-rw-r--r--roles/openshift_prometheus/tests/test.yaml5
-rw-r--r--roles/openshift_repos/tasks/main.yaml27
-rw-r--r--roles/openshift_repos/templates/yum_repo.j214
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml44
-rw-r--r--roles/openshift_service_catalog/templates/api_server_service.j213
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager_service.j213
-rw-r--r--roles/openshift_storage_glusterfs/README.md2
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml4
-rw-r--r--roles/openshift_storage_nfs/defaults/main.yml4
-rw-r--r--roles/openshift_version/tasks/main.yml7
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml2
-rw-r--r--roles/os_firewall/defaults/main.yml2
-rw-r--r--setup.py104
-rw-r--r--tox.ini1
436 files changed, 9255 insertions, 8599 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 942c51f27..a7076c210 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.104.0 ./
+3.7.0-0.125.0 ./
diff --git a/README.md b/README.md
index 315c90063..d696a33e7 100644
--- a/README.md
+++ b/README.md
@@ -55,7 +55,7 @@ you are not running a stable release.
***
Requirements:
- - Ansible >= 2.2.2.0
+ - Ansible >= 2.3.0.0
- Jinja >= 2.7
- pyOpenSSL
- python-lxml
@@ -67,14 +67,10 @@ you are not running a stable release.
dnf install -y ansible pyOpenSSL python-cryptography python-lxml
```
-2. Setup for a specific cloud:
+2. OpenShift Installation Documentation:
- - [AWS](http://github.com/openshift/openshift-ansible/blob/master/README_AWS.md)
- - [GCE](http://github.com/openshift/openshift-ansible/blob/master/README_GCE.md)
- - [local VMs](http://github.com/openshift/openshift-ansible/blob/master/README_libvirt.md)
- - Bring your own host deployments:
- - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
- - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+ - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
+ - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
## Containerized OpenShift Ansible
diff --git a/README_AWS.md b/README_AWS.md
deleted file mode 100644
index 650a921a4..000000000
--- a/README_AWS.md
+++ /dev/null
@@ -1,200 +0,0 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/planning.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/planning.html) supported installation docs.
-
-AWS Setup Instructions
-======================
-
-Get AWS API credentials
------------------------
-1. [AWS credentials documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html)
-
-
-Create a credentials file
--------------------------
-1. Create a credentials file (eg ~/.aws_creds) that looks something like this (variables must have have these exact names).
-```
- export AWS_ACCESS_KEY_ID='AKIASTUFF'
- export AWS_SECRET_ACCESS_KEY='STUFF'
-```
-2. source this file
-```
- source ~/.aws_creds
-```
-Note: You must source this file before running any Ansible commands.
-
-Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format.
-
-Subscribe to CentOS
--------------------
-
-1. [CentOS on AWS](https://aws.amazon.com/marketplace/pp/B00O7WM7QW)
-
-
-Set up Security Group
----------------------
-By default, a cluster is launched into the `public` security group. Make sure you allow hosts to talk to each other on port `4789` for SDN.
-You may also want to allow access from the outside world on the following ports:
-
-```
-• 22/TCP - ssh
-• 80/TCP - Web Apps
-• 443/TCP - Web Apps (https)
-• 4789/UDP - SDN / VXLAN
-• 8443/TCP - OpenShift Console
-• 10250/TCP - kubelet
-```
-
-
-Determine your subnet and setup the VPC
----------------------------------------
-
-In the AWS VPC console, look up your subnet ID for the region you want to use and set it as such:
-
-- export ec2_vpc_subnet='my_vpc_subnet'
-
-Go to Your VPCs, select the VPC, and under Actions -> DNS Hostnames, set to Yes and Save.
-
-
-(Optional) Setup your $HOME/.ssh/config file
--------------------------------------------
-In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config`
-to setup a private key file to allow ansible to connect to the created hosts.
-
-To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
-```
-Host *.compute-1.amazonaws.com
- IdentityFile $HOME/.ssh/my_private_key.pem
-```
-
-Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
-
-(Optional) Choose where the cluster will be launched
-----------------------------------------------------
-
-By default, a cluster is launched with the following configuration:
-
-- Instance type: m4.large
-- AMI: ami-7a9e9812 (for online deployments, ami-61bbf104 for origin deployments and ami-10663b78 for enterprise deployments)
-- Region: us-east-1
-- Keypair name: libra
-- Security group: public
-
-#### Master specific defaults:
-- Master root volume size: 10 (in GiBs)
-- Master root volume type: gp2
-- Master root volume iops: 500 (only applicable when volume type is io1)
-
-#### Node specific defaults:
-- Node root volume size: 10 (in GiBs)
-- Node root volume type: gp2
-- Node root volume iops: 500 (only applicable when volume type is io1)
-- Docker volume size: 25 (in GiBs)
-- Docker volume ephemeral: true (Whether the docker volume is ephemeral)
-- Docker volume type: gp2 (only applicable if ephemeral is false)
-- Docker volume iops: 500 (only applicable when volume type is io1)
-
-### Specifying ec2 instance type.
-
-#### All instances:
-
-- export ec2_instance_type='m4.large'
-
-#### Master instances:
-
-- export ec2_master_instance_type='m4.large'
-
-#### Infra node instances:
-
-- export ec2_infra_instance_type='m4.large'
-
-#### Non-infra node instances:
-
-- export ec2_node_instance_type='m4.large'
-
-#### etcd instances:
-
-- export ec2_etcd_instance_type='m4.large'
-
-If needed, these values can be changed by setting environment variables on your system.
-
-- export ec2_image='ami-307b3658'
-- export ec2_region='us-east-1'
-- export ec2_keypair='libra'
-- export ec2_security_groups="['public']"
-- export ec2_assign_public_ip='true'
-- export os_etcd_root_vol_size='20'
-- export os_etcd_root_vol_type='standard'
-- export os_etcd_vol_size='20'
-- export os_etcd_vol_type='standard'
-- export os_master_root_vol_size='20'
-- export os_master_root_vol_type='standard'
-- export os_node_root_vol_size='15'
-- export os_docker_vol_size='50'
-- export os_docker_vol_ephemeral='false'
-
-Install Dependencies
---------------------
-1. Ansible requires python-boto for aws operations:
-
-Fedora
-```
- dnf install -y ansible python-boto pyOpenSSL
-```
-
-RHEL/CentOS
-```
- yum install -y ansible python-boto pyOpenSSL
-```
-OSX:
-```
- pip install -U pyopenssl boto
-```
-
-
-Test The Setup
---------------
-1. cd openshift-ansible
-1. Try to list all instances (Passing an empty string as the cluster_id
-argument will result in all ec2 instances being listed)
-```
- bin/cluster list aws ''
-```
-
-Creating a cluster
-------------------
-1. To create a cluster with one master and two nodes
-```
- bin/cluster create aws <cluster-id>
-```
-
-Updating a cluster
----------------------
-1. To update the cluster
-```
- bin/cluster update aws <cluster-id>
-```
-
-Terminating a cluster
----------------------
-1. To terminate the cluster
-```
- bin/cluster terminate aws <cluster-id>
-```
-
-Specifying a deployment type
----------------------------
-The --deployment-type flag can be passed to bin/cluster to specify the deployment type
-1. To launch an OpenShift Enterprise cluster (requires a valid subscription):
-```
- bin/cluster create aws --deployment-type=openshift-enterprise <cluster-id>
-```
-Note: If no deployment type is specified, then the default is origin.
-
-
-## Post-ansible steps
-
-You should now be ready to follow the **What's Next?** section of the advanced installation guide to deploy your router, registry, and other components.
-
-Refer to the advanced installation guide for your deployment type:
-
-* [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#what-s-next)
-* [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next)
diff --git a/README_GCE.md b/README_GCE.md
deleted file mode 100644
index 99c8715de..000000000
--- a/README_GCE.md
+++ /dev/null
@@ -1,136 +0,0 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
-
-GCE Setup Instructions
-======================
-
-Get a gce service key
----------------------
-1. Ask your GCE project administrator for a GCE service key
-
-Note: If your GCE project does not show a Service Account under <Project>/APIs & auth/Credentials, you will need to use "Create new Client ID" to create a Service Account before your administrator can create the service key for you.
-
-
-Convert a GCE service key into a pem (for ansible)
---------------------------------------------------
-1. mkdir -p ~/.gce
-1. The gce service key looks something like this: projectname-ef83bd90f261.p12
-.. The ef83bd90f261 part is the public hash (GCE_KEY_HASH), The projectname part, is the project name (PROJECT_NAME).
-1. Be in the same directory as the p12 key file.
-1. The commands below should be copy / paste-able
-1. Run these commands:
-```
- # Temporarily set hash variable and project name
- export GCE_KEY_HASH=ef83bd90f261
- export PROJECT_NAME=Project Name
- export PROJECT_ID=Project ID
-
- # Convert the service key (note: 'notasecret' is literally what we want here)
- openssl pkcs12 -in "${PROJECT_NAME}-${GCE_KEY_HASH}.p12" -passin pass:notasecret -nodes -nocerts | openssl rsa -out ${PROJECT_ID}-${GCE_KEY_HASH}.pem
-
- # Move the converted service key to the .gce dir
- mv ${PROJECT_ID}-${GCE_KEY_HASH}.pem ~/.gce
-```
-
-1. Once this is done, put the original service key file (projectname-ef83bd90f261.p12) somewhere safe, or delete it (your call, I don not know what else we will use it for, and we can always regen it if needed).
-
-
-Create a gce.ini file for GCE
---------------------------------
-* gce_service_account_email_address - Found in "APIs & auth" -> Credentials -> "Service Account" -> "Email Address"
-* gce_service_account_pem_file_path - Full path from previous steps
-* gce_project_id - Found in "Projects", it list all the gce projects you are associated with. The page lists their "Project Name" and "Project ID". You want the "Project ID"
-
-Mandatory customization variables (check the values according to your tenant):
-* zone = europe-west1-d
-* network = default
-
-Optional Variable Overrides:
-* gce_ssh_user - ssh user, defaults to the current logged in user
-* gce_machine_type = n1-standard-1 - default machine type
-* gce_machine_etcd_type = n1-standard-1 - machine type for etcd hosts
-* gce_machine_master_type = n1-standard-1 - machine type for master hosts
-* gce_machine_node_type = n1-standard-1 - machine type for node hosts
-* gce_machine_image = centos-7 - default image
-* gce_machine_etcd_image = centos-7 - image for etcd hosts
-* gce_machine_master_image = centos-7 - image for master hosts
-* gce_machine_node_image = centos-7 - image for node hosts
-
-
-1. vi ~/.gce/gce.ini
-1. make the contents look like this:
-```
-[gce]
-gce_service_account_email_address = long...@developer.gserviceaccount.com
-gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem
-gce_project_id = project_id
-zone = europe-west1-d
-network = default
-gce_machine_type = n1-standard-2
-gce_machine_master_type = n1-standard-1
-gce_machine_node_type = n1-standard-2
-gce_machine_image = centos-7
-gce_machine_master_image = centos-7
-gce_machine_node_image = centos-7
-
-```
-1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
-```
-export GCE_INI_PATH=~/.gce/gce.ini
-```
-
-
-Install Dependencies
---------------------
-1. Ansible requires libcloud for gce operations:
-```
- yum install -y ansible python-libcloud
-```
-
-> Installation using Mac OSX requires pycrypto library
->
-> <kbd>$ pip install pycrypto</kbd>
-
-Test The Setup
---------------
-1. cd openshift-ansible/
-1. Try to list all instances (Passing an empty string as the cluster_id
-argument will result in all gce instances being listed)
-```
- bin/cluster list gce ''
-```
-
-Creating a cluster
-------------------
-1. To create a cluster with one master, one infra node, and two compute nodes
-```
- bin/cluster create gce <cluster-id>
-```
-1. To create a cluster with 3 masters, 3 etcd hosts, 2 infra nodes and 10
-compute nodes
-```
- bin/cluster create gce -m 3 -e 3 -i 2 -n 10 <cluster-id>
-```
-
-Updating a cluster
----------------------
-1. To update the cluster
-```
- bin/cluster update gce <cluster-id>
-```
-
-Add additional nodes
----------------------
-1. To add additional infra nodes
-```
- bin/cluster add-nodes gce -i <num nodes> <cluster-id>
-```
-1. To add additional compute nodes
-```
- bin/cluster add-nodes gce -n <num nodes> <cluster-id>
-```
-Terminating a cluster
----------------------
-1. To terminate the cluster
-```
- bin/cluster terminate gce <cluster-id>
-```
diff --git a/README_libvirt.md b/README_libvirt.md
deleted file mode 100644
index 1661681a0..000000000
--- a/README_libvirt.md
+++ /dev/null
@@ -1,163 +0,0 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
-
-LIBVIRT Setup instructions
-==========================
-
-`libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
-
-This makes `libvirt` useful to develop, test and debug OpenShift and openshift-ansible locally on the developer’s workstation before going to the cloud.
-
-Install dependencies
---------------------
-
-1. Install [ansible](http://www.ansible.com/)
-2. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
-3. Install [ebtables](http://ebtables.netfilter.org/)
-4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page)
-5. Install [libvirt-python and libvirt](http://libvirt.org/)
-6. Install [genisoimage](http://cdrkit.org/) or [mkisofs](http://cdrtools.sourceforge.net/private/cdrecord.html)
-7. Enable and start the libvirt daemon, e.g:
- - `systemctl enable libvirtd`
- - `systemctl start libvirtd`
-8. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
-9. Check that your `$HOME` is accessible to the qemu user²
-10. Configure dns resolution on the host³
-11. Install libselinux-python
-12. Ensure you have an SSH private and public keypair at `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub`⁴
-
-#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
-
-You can test it with the following command:
-
-```
-virsh -c qemu:///system pool-list
-```
-
-If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
-
-In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER`
-
-```
-sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF
-polkit.addRule(function(action, subject) {
- if (action.id == "org.libvirt.unix.manage" &&
- subject.user == "$USER") {
- return polkit.Result.YES;
- polkit.log("action=" + action);
- polkit.log("subject=" + subject);
- }
-});
-EOF
-```
-
-If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
-
-```
-ls -l /var/run/libvirt/libvirt-sock
-srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
-
-usermod -a -G libvirtd $USER
-# $USER needs to logout/login to have the new group be taken into account
-```
-
-(Replace `$USER` with your login name)
-
-#### ² Qemu will run with a specific user. It must have access to the VMs drives
-
-All the disk drive resources needed by the VMs (Fedora disk image, cloud-init files) are put inside `~/libvirt-storage-pool-openshift/`.
-
-As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
-
-If your `$HOME` is world readable, everything is fine. If your `$HOME` is private, `ansible` will fail with an error message like:
-
-```
-error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
-```
-
-In order to fix that issue, you have several possibilities:
- * set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
- * backed by a filesystem with a lot of free disk space
- * writable by your user;
- * accessible by the qemu user.
- * Grant the qemu user access to the storage pool.
-
-On Arch or Fedora 22+:
-
-```
-setfacl -m g:kvm:--x ~
-```
-
-#### ³ Enabling DNS resolution to your guest VMs with NetworkManager
-
-- Verify NetworkManager is configured to use dnsmasq:
-
-```sh
-$ sudo vi /etc/NetworkManager/NetworkManager.conf
-[main]
-dns=dnsmasq
-```
-
-- Configure dnsmasq to use the Virtual Network router for example.com:
-
-```sh
-sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf
-server=/example.com/192.168.55.1
-```
-
-#### ⁴ Private and public keypair in ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub
-
-This playbook uses SSH keys to communicate with the libvirt-driven virtual machines. At this time the names of those keys are fixed and cannot be changed.
-
-
-Test The Setup
---------------
-
-1. cd openshift-ansible/
-2. Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed)
-
-```
- bin/cluster list libvirt ''
-```
-
-Configuration
--------------
-
-The following options can be passed via the `-o` flag of the `create` command or as environment variables:
-
-* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz`): URL of the QCOW2 image to download
-* `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on
-* `image_compression` (default to `xz`): Source QCOW2 compression (only xz supported at this time)
-* `image_sha256` (default to `dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471`): Expected SHA256 checksum of the downloaded image
-* `libvirt_storage_pool` (default to `openshift-ansible`): name of the libvirt storage pool for the VM images. It will be created if it does not exist
-* `libvirt_storage_pool_path` (default to `$HOME/libvirt-storage-pool-openshift-ansible`): path to `libvirt_storage_pool`, i.e. where the VM images are stored
-* `libvirt_network` (default to `openshift-ansible`): name of the libvirt network that the VMs will use. It will be created if it does not exist
-* `libvirt_instance_memory_mib` (default to `1024`): memory of the VMs in MiB
-* `libvirt_instance_vcpu` (default to `2`): number of vCPUs of the VMs
-* `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible`
-
-Creating a cluster
-------------------
-
-1. To create a cluster with one master and two nodes
-
-```
- bin/cluster create libvirt lenaic
-```
-
-Updating a cluster
-------------------
-
-1. To update the cluster
-
-```
- bin/cluster update libvirt lenaic
-```
-
-Terminating a cluster
----------------------
-
-1. To terminate the cluster
-
-```
- bin/cluster terminate libvirt lenaic
-```
diff --git a/README_openstack.md b/README_openstack.md
deleted file mode 100644
index 2578488c7..000000000
--- a/README_openstack.md
+++ /dev/null
@@ -1,87 +0,0 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
-
-OPENSTACK Setup instructions
-============================
-
-Requirements
-------------
-
-The OpenStack instance must have Neutron and Heat enabled.
-
-Install Dependencies
---------------------
-
-1. The OpenStack python clients for Nova, Neutron and Heat are required:
-
-* `python-novaclient`
-* `python-neutronclient`
-* `python-heatclient`
-
-On Fedora:
-```
- dnf install -y ansible python-novaclient python-neutronclient python-heatclient
-```
-
-On RHEL / CentOS:
-```
- yum install -y ansible python-novaclient python-neutronclient python-heatclient
- sudo pip install shade
-```
-
-Configuration
--------------
-
-The following options can be passed via the `-o` flag of the `create` command:
-
-* `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yaml`): filename of the HEAT template to use to create the cluster infrastructure
-
-The following options are used only by `heat_stack.yaml`. They are so used only if the `infra_heat_stack` option is left with its default value.
-
-* `image_name`: Name of the image to use to spawn VMs
-* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
-* `etcd_flavor` (default to `m1.small`): The ID or name of the flavor for the etcd nodes
-* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master
-* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the compute nodes
-* `infra_flavor` (default to `m1.small`): The ID or name of the flavor for the infrastructure nodes
-* `network_prefix` (default to `openshift-ansible-<cluster_id>`): prefix prepended to all network objects (net, subnet, router, security groups)
-* `dns` (default to `8.8.8.8,8.8.4.4`): comma separated list of DNS to use
-* `net_cidr` (default to `192.168.<rand()>.0/24`): CIDR of the network created by `heat_stack.yaml`
-* `external_net` (default to `external`): Name of the external network to connect to
-* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools
-* `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
-* `node_port_from` (default to `0.0.0.0/0`): IPs authorized to connect to the services exposed via nodePort
-* `heat_timeout` (default to `3`): Timeout (in minutes) passed to heat for create or update stack.
-
-
-Creating a cluster
-------------------
-
-1. To create a cluster with one master and two nodes
-
-```
- bin/cluster create openstack <cluster-id>
-```
-
-2. To create a cluster with one master and three nodes, a custom VM image and custom DNS:
-
-```
- bin/cluster create -n 3 -o image_name=rhel-7.1-openshift-2015.05.21 -o dns=172.16.50.210,172.16.50.250 openstack lenaic
-```
-
-Updating a cluster
-------------------
-
-1. To update the cluster
-
-```
- bin/cluster update openstack <cluster-id>
-```
-
-Terminating a cluster
----------------------
-
-1. To terminate the cluster
-
-```
- bin/cluster terminate openstack <cluster-id>
-```
diff --git a/README_vagrant.md b/README_vagrant.md
deleted file mode 100644
index cb62e31d8..000000000
--- a/README_vagrant.md
+++ /dev/null
@@ -1 +0,0 @@
-The Vagrant-based installation has been moved to: https://github.com/openshift/openshift-ansible-contrib/tree/master/vagrant
diff --git a/bin/README.md b/bin/README.md
deleted file mode 100644
index fec17cb9b..000000000
--- a/bin/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# The `bin/cluster` tool
-
-This tool was meant to be the entry point for managing OpenShift clusters,
-running against different "providers" (`aws`, `gce`, `libvirt`, `openstack`),
-though its use is now deprecated in favor of the [`byo`](../playbooks/byo)
-playbooks.
diff --git a/bin/cluster b/bin/cluster
deleted file mode 100755
index f77eb36ad..000000000
--- a/bin/cluster
+++ /dev/null
@@ -1,424 +0,0 @@
-#!/usr/bin/env python2
-
-import argparse
-import ConfigParser
-import os
-import sys
-import subprocess
-import traceback
-
-
-class Cluster(object):
- """
- Provide Command, Control and Configuration (c3) Interface for OpenShift Clusters
- """
-
- def __init__(self):
- # setup ansible ssh environment
- if 'ANSIBLE_SSH_ARGS' not in os.environ:
- os.environ['ANSIBLE_SSH_ARGS'] = (
- '-o ForwardAgent=yes '
- '-o StrictHostKeyChecking=no '
- '-o UserKnownHostsFile=/dev/null '
- '-o ControlMaster=auto '
- '-o ControlPersist=600s '
- )
- # Because of `UserKnownHostsFile=/dev/null`
- # our `.ssh/known_hosts` file most probably misses the ssh host public keys
- # of our servers.
- # In that case, ansible serializes the execution of ansible modules
- # because we might be interactively prompted to accept the ssh host public keys.
- # Because of `StrictHostKeyChecking=no` we know that we won't be prompted
- # So, we don't want our modules execution to be serialized.
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
- # TODO: A more secure way to proceed would consist in dynamically
- # retrieving the ssh host public keys from the IaaS interface
- if 'ANSIBLE_SSH_PIPELINING' not in os.environ:
- os.environ['ANSIBLE_SSH_PIPELINING'] = 'True'
-
- def get_deployment_type(self, args):
- """
- Get the deployment_type based on the environment variables and the
- command line arguments
- :param args: command line arguments provided by the user
- :return: string representing the deployment type
- """
- deployment_type = 'origin'
- if args.deployment_type:
- deployment_type = args.deployment_type
- elif 'OS_DEPLOYMENT_TYPE' in os.environ:
- deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
- return deployment_type
-
-
- def create(self, args):
- """
- Create an OpenShift cluster for given provider
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- cluster['num_masters'] = args.masters
- cluster['num_nodes'] = args.nodes
- cluster['num_infra'] = args.infra
- cluster['num_etcd'] = args.etcd
- cluster['cluster_env'] = args.env
-
- if args.cloudprovider and args.provider == 'openstack':
- cluster['openshift_cloudprovider_kind'] = 'openstack'
- cluster['openshift_cloudprovider_openstack_auth_url'] = os.getenv('OS_AUTH_URL')
- cluster['openshift_cloudprovider_openstack_username'] = os.getenv('OS_USERNAME')
- cluster['openshift_cloudprovider_openstack_password'] = os.getenv('OS_PASSWORD')
- if 'OS_USER_DOMAIN_ID' in os.environ:
- cluster['openshift_cloudprovider_openstack_domain_id'] = os.getenv('OS_USER_DOMAIN_ID')
- if 'OS_USER_DOMAIN_NAME' in os.environ:
- cluster['openshift_cloudprovider_openstack_domain_name'] = os.getenv('OS_USER_DOMAIN_NAME')
- if 'OS_PROJECT_ID' in os.environ or 'OS_TENANT_ID' in os.environ:
- cluster['openshift_cloudprovider_openstack_tenant_id'] = os.getenv('OS_PROJECT_ID',os.getenv('OS_TENANT_ID'))
- if 'OS_PROJECT_NAME' is os.environ or 'OS_TENANT_NAME' in os.environ:
- cluster['openshift_cloudprovider_openstack_tenant_name'] = os.getenv('OS_PROJECT_NAME',os.getenv('OS_TENANT_NAME'))
- if 'OS_REGION_NAME' in os.environ:
- cluster['openshift_cloudprovider_openstack_region'] = os.getenv('OS_REGION_NAME')
-
- self.action(args, inventory, cluster, playbook)
-
- def add_nodes(self, args):
- """
- Add nodes to an existing cluster for given provider
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- }
- playbook = "playbooks/{0}/openshift-cluster/add_nodes.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- cluster['num_nodes'] = args.nodes
- cluster['num_infra'] = args.infra
- cluster['cluster_env'] = args.env
-
- self.action(args, inventory, cluster, playbook)
-
- def terminate(self, args):
- """
- Destroy OpenShift cluster
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'cluster_env': args.env,
- }
- playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def list(self, args):
- """
- List VMs in cluster
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'cluster_env': args.env,
- }
- playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def config(self, args):
- """
- Configure or reconfigure OpenShift across clustered VMs
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'cluster_env': args.env,
- }
- playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def update(self, args):
- """
- Update to latest OpenShift across clustered VMs
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'cluster_env': args.env,
- }
-
- playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def service(self, args):
- """
- Make the same service call across all nodes in the cluster
- :param args: command line arguments provided by user
- """
- cluster = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'new_cluster_state': args.state,
- 'cluster_env': args.env,
- }
-
- playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
- inventory = self.setup_provider(args.provider)
-
- self.action(args, inventory, cluster, playbook)
-
- def setup_provider(self, provider):
- """
- Setup ansible playbook environment
- :param provider: command line arguments provided by user
- :return: path to inventory for given provider
- """
- config = ConfigParser.ConfigParser()
- if 'gce' == provider:
- gce_ini_default_path = os.path.join('inventory/gce/hosts/gce.ini')
- gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
- if os.path.exists(gce_ini_path):
- config.readfp(open(gce_ini_path))
-
- for key in config.options('gce'):
- os.environ[key] = config.get('gce', key)
-
- inventory = '-i inventory/gce/hosts'
- elif 'aws' == provider:
- config.readfp(open('inventory/aws/hosts/ec2.ini'))
-
- for key in config.options('ec2'):
- os.environ[key] = config.get('ec2', key)
-
- inventory = '-i inventory/aws/hosts'
-
- key_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
- key_missing = [key for key in key_vars if key not in os.environ]
-
- boto_conf_files = ['~/.aws/credentials', '~/.boto']
- conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf))
- boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
-
- if len(key_missing) > 0 and len(boto_configs) == 0:
- raise ValueError("PROVIDER aws requires {0} environment variable(s). See README_AWS.md".format(key_missing))
-
- elif 'libvirt' == provider:
- inventory = '-i inventory/libvirt/hosts'
- elif 'openstack' == provider:
- inventory = '-i inventory/openstack/hosts'
- else:
- # this code should never be reached
- raise ValueError("invalid PROVIDER {0}".format(provider))
-
- return inventory
-
- def action(self, args, inventory, cluster, playbook):
- """
- Build ansible-playbook command line and execute
- :param args: command line arguments provided by user
- :param inventory: derived provider library
- :param cluster: cluster variables for kubernetes
- :param playbook: ansible playbook to execute
- """
-
- verbose = ''
- if args.verbose > 0:
- verbose = '-{0}'.format('v' * args.verbose)
-
- if args.option:
- for opt in args.option:
- k, v = opt.split('=', 1)
- cluster['cli_' + k] = v
-
- ansible_extra_vars = '-e \'{0}\''.format(
- ' '.join(['%s=%s' % (key, value) for (key, value) in cluster.items()])
- )
-
- command = 'ansible-playbook {0} {1} {2} {3}'.format(
- verbose, inventory, ansible_extra_vars, playbook
- )
-
- if args.profile:
- command = 'ANSIBLE_CALLBACK_PLUGINS=ansible-profile/callback_plugins ' + command
-
- if args.verbose > 1:
- command = 'time {0}'.format(command)
-
- if args.verbose > 0:
- sys.stderr.write('RUN [{0}]\n'.format(command))
- sys.stderr.flush()
-
- try:
- subprocess.check_call(command, shell=True)
- except subprocess.CalledProcessError as exc:
- raise ActionFailed("ACTION [{0}] failed: {1}"
- .format(args.action, exc))
-
-
-class ActionFailed(Exception):
- """
- Raised when action failed.
- """
- pass
-
-
-if __name__ == '__main__':
- """
- User command to invoke ansible playbooks in a "known" configuration
-
- Reads ~/.openshift-ansible for default configuration items
- [DEFAULT]
- validate_cluster_ids = False
- cluster_ids = marketing,sales
- providers = gce,aws,libvirt,openstack
- """
-
- warning = ("================================================================================\n"
- "ATTENTION: You are running a community supported utility that has not been\n"
- "tested by Red Hat. Visit https://docs.openshift.com for supported installation\n"
- "instructions.\n"
- "================================================================================\n\n")
- sys.stderr.write(warning)
-
- cluster_config = ConfigParser.SafeConfigParser({
- 'cluster_ids': 'marketing,sales',
- 'validate_cluster_ids': 'False',
- 'providers': 'gce,aws,libvirt,openstack',
- })
-
- path = os.path.expanduser("~/.openshift-ansible")
- if os.path.isfile(path):
- cluster_config.read(path)
-
- cluster = Cluster()
-
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawDescriptionHelpFormatter,
- description='Python wrapper to ensure proper configuration for OpenShift ansible playbooks',
- epilog='''\
-This wrapper is overriding the following ansible variables:
-
- * ANSIBLE_SSH_ARGS:
- If not set in the environment, this wrapper will use the following value:
- `-o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=600s`
- If set in the environment, the environment variable value is left untouched and used.
-
- * ANSIBLE_SSH_PIPELINING:
- If not set in the environment, this wrapper will set it to `True`.
- If you experience issues with Ansible SSH pipelining, you can disable it by explicitly setting this environment variable to `False`.
-'''
- )
- parser.add_argument('-v', '--verbose', action='count',
- help='Multiple -v options increase the verbosity')
- parser.add_argument('--version', action='version', version='%(prog)s 0.3')
-
- meta_parser = argparse.ArgumentParser(add_help=False)
- providers = cluster_config.get('DEFAULT', 'providers').split(',')
- meta_parser.add_argument('provider', choices=providers, help='provider')
-
- if cluster_config.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
- meta_parser.add_argument('cluster_id', choices=cluster_config.get('DEFAULT', 'cluster_ids').split(','),
- help='prefix for cluster VM names')
- else:
- meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
-
- meta_parser.add_argument('-t', '--deployment-type',
- choices=['origin', 'atomic-enterprise', 'openshift-enterprise'],
- help='Deployment type. (default: origin)')
- meta_parser.add_argument('-o', '--option', action='append',
- help='options')
-
- meta_parser.add_argument('--env', default='dev', type=str,
- help='environment for the cluster. Defaults to \'dev\'.')
-
- meta_parser.add_argument('-p', '--profile', action='store_true',
- help='Enable playbook profiling')
-
- action_parser = parser.add_subparsers(dest='action', title='actions',
- description='Choose from valid actions')
-
- create_parser = action_parser.add_parser('create', help='Create a cluster',
- parents=[meta_parser])
- create_parser.add_argument('-c', '--cloudprovider', action='store_true',
- help='Enable the cloudprovider')
- create_parser.add_argument('-m', '--masters', default=1, type=int,
- help='number of masters to create in cluster')
- create_parser.add_argument('-n', '--nodes', default=2, type=int,
- help='number of nodes to create in cluster')
- create_parser.add_argument('-i', '--infra', default=1, type=int,
- help='number of infra nodes to create in cluster')
- create_parser.add_argument('-e', '--etcd', default=0, type=int,
- help='number of external etcd hosts to create in cluster')
- create_parser.set_defaults(func=cluster.create)
-
-
- create_parser = action_parser.add_parser('add-nodes', help='Add nodes to a cluster',
- parents=[meta_parser])
- create_parser.add_argument('-n', '--nodes', default=1, type=int,
- help='number of nodes to add to the cluster')
- create_parser.add_argument('-i', '--infra', default=1, type=int,
- help='number of infra nodes to add to the cluster')
- create_parser.set_defaults(func=cluster.add_nodes)
-
-
- config_parser = action_parser.add_parser('config',
- help='Configure or reconfigure a cluster',
- parents=[meta_parser])
- config_parser.set_defaults(func=cluster.config)
-
- terminate_parser = action_parser.add_parser('terminate',
- help='Destroy a cluster',
- parents=[meta_parser])
- terminate_parser.add_argument('-f', '--force', action='store_true',
- help='Destroy cluster without confirmation')
- terminate_parser.set_defaults(func=cluster.terminate)
-
- update_parser = action_parser.add_parser('update',
- help='Update OpenShift across cluster',
- parents=[meta_parser])
- update_parser.add_argument('-f', '--force', action='store_true',
- help='Update cluster without confirmation')
- update_parser.set_defaults(func=cluster.update)
-
- list_parser = action_parser.add_parser('list', help='List VMs in cluster',
- parents=[meta_parser])
- list_parser.set_defaults(func=cluster.list)
-
- service_parser = action_parser.add_parser('service', help='service for openshift across cluster',
- parents=[meta_parser])
- # choices are the only ones valid for the ansible service module: http://docs.ansible.com/service_module.html
- service_parser.add_argument('state', choices=['started', 'stopped', 'restarted', 'reloaded'],
- help='make service call across cluster')
- service_parser.set_defaults(func=cluster.service)
-
- args = parser.parse_args()
-
- if 'terminate' == args.action and not args.force:
- answer = raw_input("This will destroy the ENTIRE {0} cluster. Are you sure? [y/N] ".format(args.cluster_id))
- if answer not in ['y', 'Y']:
- sys.stderr.write('\nACTION [terminate] aborted by user!\n')
- exit(1)
-
- if 'update' == args.action and not args.force:
- answer = raw_input(
- "This is destructive and could corrupt {0} cluster. Continue? [y/N] ".format(args.cluster_id))
- if answer not in ['y', 'Y']:
- sys.stderr.write('\nACTION [update] aborted by user!\n')
- exit(1)
-
- try:
- args.func(args)
- except Exception as exc:
- if args.verbose:
- traceback.print_exc(file=sys.stderr)
- else:
- print >>sys.stderr, exc
- exit(1)
diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py
index 20bdd9056..9562adb28 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/callback_plugins/aa_version_requirement.py
@@ -29,7 +29,7 @@ else:
# Set to minimum required Ansible version
-REQUIRED_VERSION = '2.2.2.0'
+REQUIRED_VERSION = '2.3.0.0'
DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
diff --git a/docs/repo_structure.md b/docs/repo_structure.md
index f598f22c3..49300f80c 100644
--- a/docs/repo_structure.md
+++ b/docs/repo_structure.md
@@ -28,12 +28,6 @@ These are plugins used in playbooks and roles:
```
.
-├── bin [DEPRECATED] Contains the `bin/cluster` script, a
-│ wrapper around the Ansible playbooks that ensures proper
-│ configuration, and facilitates installing, updating,
-│ destroying and configuring OpenShift clusters.
-│ Note: this tool is kept in the repository for legacy
-│ reasons and will be removed at some point.
└── utils Contains the `atomic-openshift-installer` command, an
interactive CLI utility to install OpenShift across a
set of hosts.
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 36a90a870..277695f78 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1024,6 +1024,18 @@ def oo_contains_rule(source, apiGroups, resources, verbs):
return False
+def oo_selector_to_string_list(user_dict):
+ """Convert a dict of selectors to a key=value list of strings
+
+Given input of {'region': 'infra', 'zone': 'primary'} returns a list
+of items as ['region=infra', 'zone=primary']
+ """
+ selectors = []
+ for key in user_dict:
+ selectors.append("{}={}".format(key, user_dict[key]))
+ return selectors
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -1065,5 +1077,6 @@ class FilterModule(object):
"oo_openshift_loadbalancer_backends": oo_openshift_loadbalancer_backends,
"to_padded_yaml": to_padded_yaml,
"oo_random_word": oo_random_word,
- "oo_contains_rule": oo_contains_rule
+ "oo_contains_rule": oo_contains_rule,
+ "oo_selector_to_string_list": oo_selector_to_string_list
}
diff --git a/inventory/README.md b/inventory/README.md
index b61bfff18..5e26e3c32 100644
--- a/inventory/README.md
+++ b/inventory/README.md
@@ -2,8 +2,4 @@
You can install OpenShift on:
-* [Amazon Web Services](aws/hosts/)
-* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
-* [GCE](gce/) (Google Compute Engine)
-* [libvirt](libvirt/hosts/)
-* [OpenStack](openstack/hosts/)
+* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
deleted file mode 100644
index 64c097d47..000000000
--- a/inventory/aws/hosts/ec2.ini
+++ /dev/null
@@ -1,189 +0,0 @@
-# Ansible EC2 external inventory script settings
-#
-
-[ec2]
-
-# to talk to a private eucalyptus instance uncomment these lines
-# and edit edit eucalyptus_host to be the host name of your cloud controller
-#eucalyptus = True
-#eucalyptus_host = clc.cloud.domain.org
-
-# AWS regions to make calls to. Set this to 'all' to make request to all regions
-# in AWS and merge the results together. Alternatively, set this to a comma
-# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
-regions = all
-regions_exclude = us-gov-west-1,cn-north-1
-
-# When generating inventory, Ansible needs to know how to address a server.
-# Each EC2 instance has a lot of variables associated with it. Here is the list:
-# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
-# Below are 2 variables that are used as the address of a server:
-# - destination_variable
-# - vpc_destination_variable
-
-# This is the normal destination variable to use. If you are running Ansible
-# from outside EC2, then 'public_dns_name' makes the most sense. If you are
-# running Ansible from within EC2, then perhaps you want to use the internal
-# address, and should set this to 'private_dns_name'. The key of an EC2 tag
-# may optionally be used; however the boto instance variables hold precedence
-# in the event of a collision.
-destination_variable = public_dns_name
-
-# This allows you to override the inventory_name with an ec2 variable, instead
-# of using the destination_variable above. Addressing (aka ansible_ssh_host)
-# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
-hostname_variable = tag_Name
-
-# For server inside a VPC, using DNS names may not make sense. When an instance
-# has 'subnet_id' set, this variable is used. If the subnet is public, setting
-# this to 'ip_address' will return the public IP address. For instances in a
-# private subnet, this should be set to 'private_ip_address', and Ansible must
-# be run from within EC2. The key of an EC2 tag may optionally be used; however
-# the boto instance variables hold precedence in the event of a collision.
-# WARNING: - instances that are in the private vpc, _without_ public ip address
-# will not be listed in the inventory until You set:
-# vpc_destination_variable = private_ip_address
-vpc_destination_variable = ip_address
-
-# The following two settings allow flexible ansible host naming based on a
-# python format string and a comma-separated list of ec2 tags. Note that:
-#
-# 1) If the tags referenced are not present for some instances, empty strings
-# will be substituted in the format string.
-# 2) This overrides both destination_variable and vpc_destination_variable.
-#
-#destination_format = {0}.{1}.example.com
-#destination_format_tags = Name,environment
-
-# To tag instances on EC2 with the resource records that point to them from
-# Route53, uncomment and set 'route53' to True.
-route53 = False
-
-# To exclude RDS instances from the inventory, uncomment and set to False.
-rds = False
-
-# To exclude ElastiCache instances from the inventory, uncomment and set to False.
-elasticache = False
-
-# Additionally, you can specify the list of zones to exclude looking up in
-# 'route53_excluded_zones' as a comma-separated list.
-# route53_excluded_zones = samplezone1.com, samplezone2.com
-
-# By default, only EC2 instances in the 'running' state are returned. Set
-# 'all_instances' to True to return all instances regardless of state.
-all_instances = False
-
-# By default, only EC2 instances in the 'running' state are returned. Specify
-# EC2 instance states to return as a comma-separated list. This
-# option is overridden when 'all_instances' is True.
-# instance_states = pending, running, shutting-down, terminated, stopping, stopped
-
-# By default, only RDS instances in the 'available' state are returned. Set
-# 'all_rds_instances' to True return all RDS instances regardless of state.
-all_rds_instances = False
-
-# Include RDS cluster information (Aurora etc.)
-include_rds_clusters = False
-
-# By default, only ElastiCache clusters and nodes in the 'available' state
-# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
-# to True return all ElastiCache clusters and nodes, regardless of state.
-#
-# Note that all_elasticache_nodes only applies to listed clusters. That means
-# if you set all_elastic_clusters to false, no node will be return from
-# unavailable clusters, regardless of the state and to what you set for
-# all_elasticache_nodes.
-all_elasticache_replication_groups = False
-all_elasticache_clusters = False
-all_elasticache_nodes = False
-
-# API calls to EC2 are slow. For this reason, we cache the results of an API
-# call. Set this to the path you want cache files to be written to. Two files
-# will be written to this directory:
-# - ansible-ec2.cache
-# - ansible-ec2.index
-cache_path = ~/.ansible/tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-# To disable the cache, set this value to 0
-cache_max_age = 300
-
-# Organize groups into a nested/hierarchy instead of a flat namespace.
-nested_groups = False
-
-# Replace - tags when creating groups to avoid issues with ansible
-replace_dash_in_groups = False
-
-# If set to true, any tag of the form "a,b,c" is expanded into a list
-# and the results are used to create additional tag_* inventory groups.
-expand_csv_tags = False
-
-# The EC2 inventory output can become very large. To manage its size,
-# configure which groups should be created.
-group_by_instance_id = True
-group_by_region = True
-group_by_availability_zone = True
-group_by_ami_id = True
-group_by_instance_type = True
-group_by_key_pair = True
-group_by_vpc_id = True
-group_by_security_group = True
-group_by_tag_keys = True
-group_by_tag_none = True
-group_by_route53_names = True
-group_by_rds_engine = True
-group_by_rds_parameter_group = True
-group_by_elasticache_engine = True
-group_by_elasticache_cluster = True
-group_by_elasticache_parameter_group = True
-group_by_elasticache_replication_group = True
-
-# If you only want to include hosts that match a certain regular expression
-# pattern_include = staging-*
-
-# If you want to exclude any hosts that match a certain regular expression
-# pattern_exclude = staging-*
-
-# Instance filters can be used to control which instances are retrieved for
-# inventory. For the full list of possible filters, please read the EC2 API
-# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
-# Filters are key/value pairs separated by '=', to list multiple filters use
-# a list separated by commas. See examples below.
-
-# Retrieve only instances with (key=value) env=staging tag
-# instance_filters = tag:env=staging
-
-# Retrieve only instances with role=webservers OR role=dbservers tag
-# instance_filters = tag:role=webservers,tag:role=dbservers
-
-# Retrieve only t1.micro instances OR instances with tag env=staging
-# instance_filters = instance-type=t1.micro,tag:env=staging
-
-# You can use wildcards in filter values also. Below will list instances which
-# tag Name value matches webservers1*
-# (ex. webservers15, webservers1a, webservers123 etc)
-# instance_filters = tag:Name=webservers1*
-
-# A boto configuration profile may be used to separate out credentials
-# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
-# boto_profile = some-boto-profile-name
-
-
-[credentials]
-
-# The AWS credentials can optionally be specified here. Credentials specified
-# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
-# AWS_PROFILE is set, or if the boto_profile property above is set.
-#
-# Supplying AWS credentials here is not recommended, as it introduces
-# non-trivial security concerns. When going down this route, please make sure
-# to set access permissions for this file correctly, e.g. handle it the same
-# way as you would a private SSH key.
-#
-# Unlike the boto and AWS configure files, this section does not support
-# profiles.
-#
-# aws_access_key_id = AXXXXXXXXXXXXXX
-# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
-# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py
deleted file mode 100755
index b71458a29..000000000
--- a/inventory/aws/hosts/ec2.py
+++ /dev/null
@@ -1,1511 +0,0 @@
-#!/usr/bin/env python2
-# pylint: skip-file
-
-'''
-EC2 external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API request to
-AWS EC2 using the Boto library.
-
-NOTE: This script assumes Ansible is being executed where the environment
-variables needed for Boto have already been set:
- export AWS_ACCESS_KEY_ID='AK123'
- export AWS_SECRET_ACCESS_KEY='abc123'
-
-This script also assumes there is an ec2.ini file alongside it. To specify a
-different path to ec2.ini, define the EC2_INI_PATH environment variable:
-
- export EC2_INI_PATH=/path/to/my_ec2.ini
-
-If you're using eucalyptus you need to set the above variables and
-you need to define:
-
- export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
-
-If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
-using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
-the AWS_PROFILE variable:
-
- AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
-
-For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
-
-When run against a specific host, this script returns the following variables:
- - ec2_ami_launch_index
- - ec2_architecture
- - ec2_association
- - ec2_attachTime
- - ec2_attachment
- - ec2_attachmentId
- - ec2_block_devices
- - ec2_client_token
- - ec2_deleteOnTermination
- - ec2_description
- - ec2_deviceIndex
- - ec2_dns_name
- - ec2_eventsSet
- - ec2_group_name
- - ec2_hypervisor
- - ec2_id
- - ec2_image_id
- - ec2_instanceState
- - ec2_instance_type
- - ec2_ipOwnerId
- - ec2_ip_address
- - ec2_item
- - ec2_kernel
- - ec2_key_name
- - ec2_launch_time
- - ec2_monitored
- - ec2_monitoring
- - ec2_networkInterfaceId
- - ec2_ownerId
- - ec2_persistent
- - ec2_placement
- - ec2_platform
- - ec2_previous_state
- - ec2_private_dns_name
- - ec2_private_ip_address
- - ec2_publicIp
- - ec2_public_dns_name
- - ec2_ramdisk
- - ec2_reason
- - ec2_region
- - ec2_requester_id
- - ec2_root_device_name
- - ec2_root_device_type
- - ec2_security_group_ids
- - ec2_security_group_names
- - ec2_shutdown_state
- - ec2_sourceDestCheck
- - ec2_spot_instance_request_id
- - ec2_state
- - ec2_state_code
- - ec2_state_reason
- - ec2_status
- - ec2_subnet_id
- - ec2_tenancy
- - ec2_virtualization_type
- - ec2_vpc_id
-
-These variables are pulled out of a boto.ec2.instance object. There is a lack of
-consistency with variable spellings (camelCase and underscores) since this
-just loops through all variables the object exposes. It is preferred to use the
-ones with underscores when multiple exist.
-
-In addition, if an instance has AWS Tags associated with it, each tag is a new
-variable named:
- - ec2_tag_[Key] = [Value]
-
-Security groups are comma-separated in 'ec2_security_group_ids' and
-'ec2_security_group_names'.
-'''
-
-# (c) 2012, Peter Sankauskas
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-import sys
-import os
-import argparse
-import re
-from time import time
-import boto
-from boto import ec2
-from boto import rds
-from boto import elasticache
-from boto import route53
-import six
-
-from ansible.module_utils import ec2 as ec2_utils
-
-HAS_BOTO3 = False
-try:
- import boto3
- HAS_BOTO3 = True
-except ImportError:
- pass
-
-from six.moves import configparser
-from collections import defaultdict
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-
-class Ec2Inventory(object):
-
- def _empty_inventory(self):
- return {"_meta" : {"hostvars" : {}}}
-
- def __init__(self):
- ''' Main execution path '''
-
- # Inventory grouped by instance IDs, tags, security groups, regions,
- # and availability zones
- self.inventory = self._empty_inventory()
-
- # Index of hostname (address) to instance ID
- self.index = {}
-
- # Boto profile to use (if any)
- self.boto_profile = None
-
- # AWS credentials.
- self.credentials = {}
-
- # Read settings and parse CLI arguments
- self.parse_cli_args()
- self.read_settings()
-
- # Make sure that profile_name is not passed at all if not set
- # as pre 2.24 boto will fall over otherwise
- if self.boto_profile:
- if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
- self.fail_with_error("boto version must be >= 2.24 to use profile")
-
- # Cache
- if self.args.refresh_cache:
- self.do_api_calls_update_cache()
- elif not self.is_cache_valid():
- self.do_api_calls_update_cache()
-
- # Data to print
- if self.args.host:
- data_to_print = self.get_host_info()
-
- elif self.args.list:
- # Display list of instances for inventory
- if self.inventory == self._empty_inventory():
- data_to_print = self.get_inventory_from_cache()
- else:
- data_to_print = self.json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
-
- def is_cache_valid(self):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_index):
- return True
-
- return False
-
-
- def read_settings(self):
- ''' Reads the settings from the ec2.ini file '''
- if six.PY3:
- config = configparser.ConfigParser()
- else:
- config = configparser.SafeConfigParser()
- ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
- ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
- config.read(ec2_ini_path)
-
- # is eucalyptus?
- self.eucalyptus_host = None
- self.eucalyptus = False
- if config.has_option('ec2', 'eucalyptus'):
- self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
- if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
- self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
-
- # Regions
- self.regions = []
- configRegions = config.get('ec2', 'regions')
- configRegions_exclude = config.get('ec2', 'regions_exclude')
- if (configRegions == 'all'):
- if self.eucalyptus_host:
- self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
- else:
- for regionInfo in ec2.regions():
- if regionInfo.name not in configRegions_exclude:
- self.regions.append(regionInfo.name)
- else:
- self.regions = configRegions.split(",")
-
- # Destination addresses
- self.destination_variable = config.get('ec2', 'destination_variable')
- self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
-
- if config.has_option('ec2', 'hostname_variable'):
- self.hostname_variable = config.get('ec2', 'hostname_variable')
- else:
- self.hostname_variable = None
-
- if config.has_option('ec2', 'destination_format') and \
- config.has_option('ec2', 'destination_format_tags'):
- self.destination_format = config.get('ec2', 'destination_format')
- self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
- else:
- self.destination_format = None
- self.destination_format_tags = None
-
- # Route53
- self.route53_enabled = config.getboolean('ec2', 'route53')
- self.route53_excluded_zones = []
- if config.has_option('ec2', 'route53_excluded_zones'):
- self.route53_excluded_zones.extend(
- config.get('ec2', 'route53_excluded_zones', '').split(','))
-
- # Include RDS instances?
- self.rds_enabled = True
- if config.has_option('ec2', 'rds'):
- self.rds_enabled = config.getboolean('ec2', 'rds')
-
- # Include RDS cluster instances?
- if config.has_option('ec2', 'include_rds_clusters'):
- self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
- else:
- self.include_rds_clusters = False
-
- # Include ElastiCache instances?
- self.elasticache_enabled = True
- if config.has_option('ec2', 'elasticache'):
- self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
-
- # Return all EC2 instances?
- if config.has_option('ec2', 'all_instances'):
- self.all_instances = config.getboolean('ec2', 'all_instances')
- else:
- self.all_instances = False
-
- # Instance states to be gathered in inventory. Default is 'running'.
- # Setting 'all_instances' to 'yes' overrides this option.
- ec2_valid_instance_states = [
- 'pending',
- 'running',
- 'shutting-down',
- 'terminated',
- 'stopping',
- 'stopped'
- ]
- self.ec2_instance_states = []
- if self.all_instances:
- self.ec2_instance_states = ec2_valid_instance_states
- elif config.has_option('ec2', 'instance_states'):
- for instance_state in config.get('ec2', 'instance_states').split(','):
- instance_state = instance_state.strip()
- if instance_state not in ec2_valid_instance_states:
- continue
- self.ec2_instance_states.append(instance_state)
- else:
- self.ec2_instance_states = ['running']
-
- # Return all RDS instances? (if RDS is enabled)
- if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
- self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
- else:
- self.all_rds_instances = False
-
- # Return all ElastiCache replication groups? (if ElastiCache is enabled)
- if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
- self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
- else:
- self.all_elasticache_replication_groups = False
-
- # Return all ElastiCache clusters? (if ElastiCache is enabled)
- if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
- self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
- else:
- self.all_elasticache_clusters = False
-
- # Return all ElastiCache nodes? (if ElastiCache is enabled)
- if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
- self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
- else:
- self.all_elasticache_nodes = False
-
- # boto configuration profile (prefer CLI argument)
- self.boto_profile = self.args.boto_profile
- if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
- self.boto_profile = config.get('ec2', 'boto_profile')
-
- # AWS credentials (prefer environment variables)
- if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
- os.environ.get('AWS_PROFILE')):
- if config.has_option('credentials', 'aws_access_key_id'):
- aws_access_key_id = config.get('credentials', 'aws_access_key_id')
- else:
- aws_access_key_id = None
- if config.has_option('credentials', 'aws_secret_access_key'):
- aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
- else:
- aws_secret_access_key = None
- if config.has_option('credentials', 'aws_security_token'):
- aws_security_token = config.get('credentials', 'aws_security_token')
- else:
- aws_security_token = None
- if aws_access_key_id:
- self.credentials = {
- 'aws_access_key_id': aws_access_key_id,
- 'aws_secret_access_key': aws_secret_access_key
- }
- if aws_security_token:
- self.credentials['security_token'] = aws_security_token
-
- # Cache related
- cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
- if self.boto_profile:
- cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
-
- cache_name = 'ansible-ec2'
- aws_profile = lambda: (self.boto_profile or
- os.environ.get('AWS_PROFILE') or
- os.environ.get('AWS_ACCESS_KEY_ID') or
- self.credentials.get('aws_access_key_id', None))
- if aws_profile():
- cache_name = '%s-%s' % (cache_name, aws_profile())
- self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
- self.cache_path_index = cache_dir + "/%s.index" % cache_name
- self.cache_max_age = config.getint('ec2', 'cache_max_age')
-
- if config.has_option('ec2', 'expand_csv_tags'):
- self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
- else:
- self.expand_csv_tags = False
-
- # Configure nested groups instead of flat namespace.
- if config.has_option('ec2', 'nested_groups'):
- self.nested_groups = config.getboolean('ec2', 'nested_groups')
- else:
- self.nested_groups = False
-
- # Replace dash or not in group names
- if config.has_option('ec2', 'replace_dash_in_groups'):
- self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
- else:
- self.replace_dash_in_groups = True
-
- # Configure which groups should be created.
- group_by_options = [
- 'group_by_instance_id',
- 'group_by_region',
- 'group_by_availability_zone',
- 'group_by_ami_id',
- 'group_by_instance_type',
- 'group_by_key_pair',
- 'group_by_vpc_id',
- 'group_by_security_group',
- 'group_by_tag_keys',
- 'group_by_tag_none',
- 'group_by_route53_names',
- 'group_by_rds_engine',
- 'group_by_rds_parameter_group',
- 'group_by_elasticache_engine',
- 'group_by_elasticache_cluster',
- 'group_by_elasticache_parameter_group',
- 'group_by_elasticache_replication_group',
- ]
- for option in group_by_options:
- if config.has_option('ec2', option):
- setattr(self, option, config.getboolean('ec2', option))
- else:
- setattr(self, option, True)
-
- # Do we need to just include hosts that match a pattern?
- try:
- pattern_include = config.get('ec2', 'pattern_include')
- if pattern_include and len(pattern_include) > 0:
- self.pattern_include = re.compile(pattern_include)
- else:
- self.pattern_include = None
- except configparser.NoOptionError:
- self.pattern_include = None
-
- # Do we need to exclude hosts that match a pattern?
- try:
- pattern_exclude = config.get('ec2', 'pattern_exclude');
- if pattern_exclude and len(pattern_exclude) > 0:
- self.pattern_exclude = re.compile(pattern_exclude)
- else:
- self.pattern_exclude = None
- except configparser.NoOptionError:
- self.pattern_exclude = None
-
- # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
- self.ec2_instance_filters = defaultdict(list)
- if config.has_option('ec2', 'instance_filters'):
-
- filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
-
- for instance_filter in filters:
- instance_filter = instance_filter.strip()
- if not instance_filter or '=' not in instance_filter:
- continue
- filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
- if not filter_key:
- continue
- self.ec2_instance_filters[filter_key].append(filter_value)
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all the variables about a specific instance')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
- parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
- help='Use boto profile for connections to EC2')
- self.args = parser.parse_args()
-
-
- def do_api_calls_update_cache(self):
- ''' Do API calls to each region, and save data in cache files '''
-
- if self.route53_enabled:
- self.get_route53_records()
-
- for region in self.regions:
- self.get_instances_by_region(region)
- if self.rds_enabled:
- self.get_rds_instances_by_region(region)
- if self.elasticache_enabled:
- self.get_elasticache_clusters_by_region(region)
- self.get_elasticache_replication_groups_by_region(region)
- if self.include_rds_clusters:
- self.include_rds_clusters_by_region(region)
-
- self.write_to_cache(self.inventory, self.cache_path_cache)
- self.write_to_cache(self.index, self.cache_path_index)
-
- def connect(self, region):
- ''' create connection to api server'''
- if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
- conn.APIVersion = '2010-08-31'
- else:
- conn = self.connect_to_aws(ec2, region)
- return conn
-
- def boto_fix_security_token_in_profile(self, connect_args):
- ''' monkey patch for boto issue boto/boto#2100 '''
- profile = 'profile ' + self.boto_profile
- if boto.config.has_option(profile, 'aws_security_token'):
- connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
- return connect_args
-
- def connect_to_aws(self, module, region):
- connect_args = self.credentials
-
- # only pass the profile name if it's set (as it is not supported by older boto versions)
- if self.boto_profile:
- connect_args['profile_name'] = self.boto_profile
- self.boto_fix_security_token_in_profile(connect_args)
-
- conn = module.connect_to_region(region, **connect_args)
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- return conn
-
- def get_instances_by_region(self, region):
- ''' Makes an AWS EC2 API call to the list of instances in a particular
- region '''
-
- try:
- conn = self.connect(region)
- reservations = []
- if self.ec2_instance_filters:
- for filter_key, filter_values in self.ec2_instance_filters.items():
- reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
- else:
- reservations = conn.get_all_instances()
-
- # Pull the tags back in a second step
- # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
- # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
- instance_ids = []
- for reservation in reservations:
- instance_ids.extend([instance.id for instance in reservation.instances])
-
- max_filter_value = 199
- tags = []
- for i in range(0, len(instance_ids), max_filter_value):
- tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
-
- tags_by_instance_id = defaultdict(dict)
- for tag in tags:
- tags_by_instance_id[tag.res_id][tag.name] = tag.value
-
- for reservation in reservations:
- for instance in reservation.instances:
- instance.tags = tags_by_instance_id[instance.id]
- self.add_instance(instance, region)
-
- except boto.exception.BotoServerError as e:
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- else:
- backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
- error = "Error connecting to %s backend.\n%s" % (backend, e.message)
- self.fail_with_error(error, 'getting EC2 instances')
-
- def get_rds_instances_by_region(self, region):
- ''' Makes an AWS API call to the list of RDS instances in a particular
- region '''
-
- try:
- conn = self.connect_to_aws(rds, region)
- if conn:
- marker = None
- while True:
- instances = conn.get_all_dbinstances(marker=marker)
- marker = instances.marker
- for instance in instances:
- self.add_rds_instance(instance, region)
- if not marker:
- break
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- if not e.reason == "Forbidden":
- error = "Looks like AWS RDS is down:\n%s" % e.message
- self.fail_with_error(error, 'getting RDS instances')
-
- def include_rds_clusters_by_region(self, region):
- if not HAS_BOTO3:
- self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
- "getting RDS clusters")
-
- client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
-
- marker, clusters = '', []
- while marker is not None:
- resp = client.describe_db_clusters(Marker=marker)
- clusters.extend(resp["DBClusters"])
- marker = resp.get('Marker', None)
-
- account_id = boto.connect_iam().get_user().arn.split(':')[4]
- c_dict = {}
- for c in clusters:
- # remove these datetime objects as there is no serialisation to json
- # currently in place and we don't need the data yet
- if 'EarliestRestorableTime' in c:
- del c['EarliestRestorableTime']
- if 'LatestRestorableTime' in c:
- del c['LatestRestorableTime']
-
- if self.ec2_instance_filters == {}:
- matches_filter = True
- else:
- matches_filter = False
-
- try:
- # arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
- tags = client.list_tags_for_resource(
- ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
- c['Tags'] = tags['TagList']
-
- if self.ec2_instance_filters:
- for filter_key, filter_values in self.ec2_instance_filters.items():
- # get AWS tag key e.g. tag:env will be 'env'
- tag_name = filter_key.split(":", 1)[1]
- # Filter values is a list (if you put multiple values for the same tag name)
- matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
-
- if matches_filter:
- # it matches a filter, so stop looking for further matches
- break
-
- except Exception as e:
- if e.message.find('DBInstanceNotFound') >= 0:
- # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
- # Ignore errors when trying to find tags for these
- pass
-
- # ignore empty clusters caused by AWS bug
- if len(c['DBClusterMembers']) == 0:
- continue
- elif matches_filter:
- c_dict[c['DBClusterIdentifier']] = c
-
- self.inventory['db_clusters'] = c_dict
-
- def get_elasticache_clusters_by_region(self, region):
- ''' Makes an AWS API call to the list of ElastiCache clusters (with
- nodes' info) in a particular region.'''
-
- # ElastiCache boto module doesn't provide a get_all_intances method,
- # that's why we need to call describe directly (it would be called by
- # the shorthand method anyway...)
- try:
- conn = self.connect_to_aws(elasticache, region)
- if conn:
- # show_cache_node_info = True
- # because we also want nodes' information
- response = conn.describe_cache_clusters(None, None, None, True)
-
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- if not e.reason == "Forbidden":
- error = "Looks like AWS ElastiCache is down:\n%s" % e.message
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- try:
- # Boto also doesn't provide wrapper classes to CacheClusters or
- # CacheNodes. Because of that we can't make use of the get_list
- # method in the AWSQueryConnection. Let's do the work manually
- clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
-
- except KeyError as e:
- error = "ElastiCache query to AWS failed (unexpected format)."
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- for cluster in clusters:
- self.add_elasticache_cluster(cluster, region)
-
- def get_elasticache_replication_groups_by_region(self, region):
- ''' Makes an AWS API call to the list of ElastiCache replication groups
- in a particular region.'''
-
- # ElastiCache boto module doesn't provide a get_all_intances method,
- # that's why we need to call describe directly (it would be called by
- # the shorthand method anyway...)
- try:
- conn = self.connect_to_aws(elasticache, region)
- if conn:
- response = conn.describe_replication_groups()
-
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- if not e.reason == "Forbidden":
- error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- try:
- # Boto also doesn't provide wrapper classes to ReplicationGroups
- # Because of that we can't make use of the get_list method in the
- # AWSQueryConnection. Let's do the work manually
- replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
-
- except KeyError as e:
- error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- for replication_group in replication_groups:
- self.add_elasticache_replication_group(replication_group, region)
-
- def get_auth_error_message(self):
- ''' create an informative error message if there is an issue authenticating'''
- errors = ["Authentication error retrieving ec2 inventory."]
- if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
- errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
- else:
- errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
-
- boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
- boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
- if len(boto_config_found) > 0:
- errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
- else:
- errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
-
- return '\n'.join(errors)
-
- def fail_with_error(self, err_msg, err_operation=None):
- '''log an error to std err for ansible-playbook to consume and exit'''
- if err_operation:
- err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
- err_msg=err_msg, err_operation=err_operation)
- sys.stderr.write(err_msg)
- sys.exit(1)
-
- def get_instance(self, region, instance_id):
- conn = self.connect(region)
-
- reservations = conn.get_all_instances([instance_id])
- for reservation in reservations:
- for instance in reservation.instances:
- return instance
-
- def add_instance(self, instance, region):
- ''' Adds an instance to the inventory and index, as long as it is
- addressable '''
-
- # Only return instances with desired instance states
- if instance.state not in self.ec2_instance_states:
- return
-
- # Select the best destination address
- if self.destination_format and self.destination_format_tags:
- dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
- elif instance.subnet_id:
- dest = getattr(instance, self.vpc_destination_variable, None)
- if dest is None:
- dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
- else:
- dest = getattr(instance, self.destination_variable, None)
- if dest is None:
- dest = getattr(instance, 'tags').get(self.destination_variable, None)
-
- if not dest:
- # Skip instances we cannot address (e.g. private VPC subnet)
- return
-
- # Set the inventory name
- hostname = None
- if self.hostname_variable:
- if self.hostname_variable.startswith('tag_'):
- hostname = instance.tags.get(self.hostname_variable[4:], None)
- else:
- hostname = getattr(instance, self.hostname_variable)
-
- # If we can't get a nice hostname, use the destination address
- if not hostname:
- hostname = dest
- else:
- hostname = self.to_safe(hostname).lower()
-
- # if we only want to include hosts that match a pattern, skip those that don't
- if self.pattern_include and not self.pattern_include.match(hostname):
- return
-
- # if we need to exclude hosts that match a pattern, skip those
- if self.pattern_exclude and self.pattern_exclude.match(hostname):
- return
-
- # Add to index
- self.index[hostname] = [region, instance.id]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[instance.id] = [hostname]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', instance.id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, instance.placement, hostname)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, instance.placement)
- self.push_group(self.inventory, 'zones', instance.placement)
-
- # Inventory: Group by Amazon Machine Image (AMI) ID
- if self.group_by_ami_id:
- ami_id = self.to_safe(instance.image_id)
- self.push(self.inventory, ami_id, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'images', ami_id)
-
- # Inventory: Group by instance type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + instance.instance_type)
- self.push(self.inventory, type_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by key pair
- if self.group_by_key_pair and instance.key_name:
- key_name = self.to_safe('key_' + instance.key_name)
- self.push(self.inventory, key_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'keys', key_name)
-
- # Inventory: Group by VPC
- if self.group_by_vpc_id and instance.vpc_id:
- vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
- self.push(self.inventory, vpc_id_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'vpcs', vpc_id_name)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
- try:
- for group in instance.groups:
- key = self.to_safe("security_group_" + group.name)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
- except AttributeError:
- self.fail_with_error('\n'.join(['Package boto seems a bit older.',
- 'Please upgrade boto >= 2.3.0.']))
-
- # Inventory: Group by tag keys
- if self.group_by_tag_keys:
- for k, v in instance.tags.items():
- if self.expand_csv_tags and v and ',' in v:
- values = map(lambda x: x.strip(), v.split(','))
- else:
- values = [v]
-
- for v in values:
- if v:
- key = self.to_safe("tag_" + k + "=" + v)
- else:
- key = self.to_safe("tag_" + k)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
- if v:
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
-
- # Inventory: Group by Route53 domain names if enabled
- if self.route53_enabled and self.group_by_route53_names:
- route53_names = self.get_instance_route53_names(instance)
- for name in route53_names:
- self.push(self.inventory, name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'route53', name)
-
- # Global Tag: instances without tags
- if self.group_by_tag_none and len(instance.tags) == 0:
- self.push(self.inventory, 'tag_none', hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', 'tag_none')
-
- # Global Tag: tag all EC2 instances
- self.push(self.inventory, 'ec2', hostname)
-
- self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
- self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
-
-
- def add_rds_instance(self, instance, region):
- ''' Adds an RDS instance to the inventory and index, as long as it is
- addressable '''
-
- # Only want available instances unless all_rds_instances is True
- if not self.all_rds_instances and instance.status != 'available':
- return
-
- # Select the best destination address
- dest = instance.endpoint[0]
-
- if not dest:
- # Skip instances we cannot address (e.g. private VPC subnet)
- return
-
- # Set the inventory name
- hostname = None
- if self.hostname_variable:
- if self.hostname_variable.startswith('tag_'):
- hostname = instance.tags.get(self.hostname_variable[4:], None)
- else:
- hostname = getattr(instance, self.hostname_variable)
-
- # If we can't get a nice hostname, use the destination address
- if not hostname:
- hostname = dest
-
- hostname = self.to_safe(hostname).lower()
-
- # Add to index
- self.index[hostname] = [region, instance.id]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[instance.id] = [hostname]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', instance.id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, instance.availability_zone, hostname)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, instance.availability_zone)
- self.push_group(self.inventory, 'zones', instance.availability_zone)
-
- # Inventory: Group by instance type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + instance.instance_class)
- self.push(self.inventory, type_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC
- if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
- vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
- self.push(self.inventory, vpc_id_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'vpcs', vpc_id_name)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
- try:
- if instance.security_group:
- key = self.to_safe("security_group_" + instance.security_group.name)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- except AttributeError:
- self.fail_with_error('\n'.join(['Package boto seems a bit older.',
- 'Please upgrade boto >= 2.3.0.']))
-
-
- # Inventory: Group by engine
- if self.group_by_rds_engine:
- self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
-
- # Inventory: Group by parameter group
- if self.group_by_rds_parameter_group:
- self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
-
- # Global Tag: all RDS instances
- self.push(self.inventory, 'rds', hostname)
-
- self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
- self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
-
- def add_elasticache_cluster(self, cluster, region):
- ''' Adds an ElastiCache cluster to the inventory and index, as long as
- it's nodes are addressable '''
-
- # Only want available clusters unless all_elasticache_clusters is True
- if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
- return
-
- # Select the best destination address
- if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
- # Memcached cluster
- dest = cluster['ConfigurationEndpoint']['Address']
- is_redis = False
- else:
- # Redis sigle node cluster
- # Because all Redis clusters are single nodes, we'll merge the
- # info from the cluster with info about the node
- dest = cluster['CacheNodes'][0]['Endpoint']['Address']
- is_redis = True
-
- if not dest:
- # Skip clusters we cannot address (e.g. private VPC subnet)
- return
-
- # Add to index
- self.index[dest] = [region, cluster['CacheClusterId']]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[cluster['CacheClusterId']] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
-
- # Inventory: Group by region
- if self.group_by_region and not is_redis:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone and not is_redis:
- self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
- self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
-
- # Inventory: Group by node type
- if self.group_by_instance_type and not is_redis:
- type_name = self.to_safe('type_' + cluster['CacheNodeType'])
- self.push(self.inventory, type_name, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for ElastiCache)
-
- # Inventory: Group by security group
- if self.group_by_security_group and not is_redis:
-
- # Check for the existence of the 'SecurityGroups' key and also if
- # this key has some value. When the cluster is not placed in a SG
- # the query can return None here and cause an error.
- if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
- for security_group in cluster['SecurityGroups']:
- key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- # Inventory: Group by engine
- if self.group_by_elasticache_engine and not is_redis:
- self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
-
- # Inventory: Group by parameter group
- if self.group_by_elasticache_parameter_group:
- self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
-
- # Inventory: Group by replication group
- if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
- self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
-
- # Global Tag: all ElastiCache clusters
- self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
-
- host_info = self.get_host_info_dict_from_describe_dict(cluster)
-
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- # Add the nodes
- for node in cluster['CacheNodes']:
- self.add_elasticache_node(node, cluster, region)
-
- def add_elasticache_node(self, node, cluster, region):
- ''' Adds an ElastiCache node to the inventory and index, as long as
- it is addressable '''
-
- # Only want available nodes unless all_elasticache_nodes is True
- if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
- return
-
- # Select the best destination address
- dest = node['Endpoint']['Address']
-
- if not dest:
- # Skip nodes we cannot address (e.g. private VPC subnet)
- return
-
- node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
-
- # Add to index
- self.index[dest] = [region, node_id]
-
- # Inventory: Group by node ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[node_id] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', node_id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
- self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
-
- # Inventory: Group by node type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + cluster['CacheNodeType'])
- self.push(self.inventory, type_name, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for ElastiCache)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
-
- # Check for the existence of the 'SecurityGroups' key and also if
- # this key has some value. When the cluster is not placed in a SG
- # the query can return None here and cause an error.
- if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
- for security_group in cluster['SecurityGroups']:
- key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- # Inventory: Group by engine
- if self.group_by_elasticache_engine:
- self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
-
- # Inventory: Group by parameter group (done at cluster level)
-
- # Inventory: Group by replication group (done at cluster level)
-
- # Inventory: Group by ElastiCache Cluster
- if self.group_by_elasticache_cluster:
- self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
-
- # Global Tag: all ElastiCache nodes
- self.push(self.inventory, 'elasticache_nodes', dest)
-
- host_info = self.get_host_info_dict_from_describe_dict(node)
-
- if dest in self.inventory["_meta"]["hostvars"]:
- self.inventory["_meta"]["hostvars"][dest].update(host_info)
- else:
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- def add_elasticache_replication_group(self, replication_group, region):
- ''' Adds an ElastiCache replication group to the inventory and index '''
-
- # Only want available clusters unless all_elasticache_replication_groups is True
- if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
- return
-
- # Select the best destination address (PrimaryEndpoint)
- dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
-
- if not dest:
- # Skip clusters we cannot address (e.g. private VPC subnet)
- return
-
- # Add to index
- self.index[dest] = [region, replication_group['ReplicationGroupId']]
-
- # Inventory: Group by ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[replication_group['ReplicationGroupId']] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone (doesn't apply to replication groups)
-
- # Inventory: Group by node type (doesn't apply to replication groups)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for replication groups
-
- # Inventory: Group by security group (doesn't apply to replication groups)
- # Check this value in cluster level
-
- # Inventory: Group by engine (replication groups are always Redis)
- if self.group_by_elasticache_engine:
- self.push(self.inventory, 'elasticache_redis', dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', 'redis')
-
- # Global Tag: all ElastiCache clusters
- self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
-
- host_info = self.get_host_info_dict_from_describe_dict(replication_group)
-
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- def get_route53_records(self):
- ''' Get and store the map of resource records to domain names that
- point to them. '''
-
- r53_conn = route53.Route53Connection()
- all_zones = r53_conn.get_zones()
-
- route53_zones = [ zone for zone in all_zones if zone.name[:-1]
- not in self.route53_excluded_zones ]
-
- self.route53_records = {}
-
- for zone in route53_zones:
- rrsets = r53_conn.get_all_rrsets(zone.id)
-
- for record_set in rrsets:
- record_name = record_set.name
-
- if record_name.endswith('.'):
- record_name = record_name[:-1]
-
- for resource in record_set.resource_records:
- self.route53_records.setdefault(resource, set())
- self.route53_records[resource].add(record_name)
-
-
- def get_instance_route53_names(self, instance):
- ''' Check if an instance is referenced in the records we have from
- Route53. If it is, return the list of domain names pointing to said
- instance. If nothing points to it, return an empty list. '''
-
- instance_attributes = [ 'public_dns_name', 'private_dns_name',
- 'ip_address', 'private_ip_address' ]
-
- name_list = set()
-
- for attrib in instance_attributes:
- try:
- value = getattr(instance, attrib)
- except AttributeError:
- continue
-
- if value in self.route53_records:
- name_list.update(self.route53_records[value])
-
- return list(name_list)
-
- def get_host_info_dict_from_instance(self, instance):
- instance_vars = {}
- for key in vars(instance):
- value = getattr(instance, key)
- key = self.to_safe('ec2_' + key)
-
- # Handle complex types
- # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
- if key == 'ec2__state':
- instance_vars['ec2_state'] = instance.state or ''
- instance_vars['ec2_state_code'] = instance.state_code
- elif key == 'ec2__previous_state':
- instance_vars['ec2_previous_state'] = instance.previous_state or ''
- instance_vars['ec2_previous_state_code'] = instance.previous_state_code
- elif type(value) in [int, bool]:
- instance_vars[key] = value
- elif isinstance(value, six.string_types):
- instance_vars[key] = value.strip()
- elif type(value) == type(None):
- instance_vars[key] = ''
- elif key == 'ec2_region':
- instance_vars[key] = value.name
- elif key == 'ec2__placement':
- instance_vars['ec2_placement'] = value.zone
- elif key == 'ec2_tags':
- for k, v in value.items():
- if self.expand_csv_tags and ',' in v:
- v = list(map(lambda x: x.strip(), v.split(',')))
- key = self.to_safe('ec2_tag_' + k)
- instance_vars[key] = v
- elif key == 'ec2_groups':
- group_ids = []
- group_names = []
- for group in value:
- group_ids.append(group.id)
- group_names.append(group.name)
- instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
- instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
- elif key == 'ec2_block_device_mapping':
- instance_vars["ec2_block_devices"] = {}
- for k, v in value.items():
- instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
- else:
- pass
- # TODO Product codes if someone finds them useful
- #print key
- #print type(value)
- #print value
-
- return instance_vars
-
- def get_host_info_dict_from_describe_dict(self, describe_dict):
- ''' Parses the dictionary returned by the API call into a flat list
- of parameters. This method should be used only when 'describe' is
- used directly because Boto doesn't provide specific classes. '''
-
- # I really don't agree with prefixing everything with 'ec2'
- # because EC2, RDS and ElastiCache are different services.
- # I'm just following the pattern used until now to not break any
- # compatibility.
-
- host_info = {}
- for key in describe_dict:
- value = describe_dict[key]
- key = self.to_safe('ec2_' + self.uncammelize(key))
-
- # Handle complex types
-
- # Target: Memcached Cache Clusters
- if key == 'ec2_configuration_endpoint' and value:
- host_info['ec2_configuration_endpoint_address'] = value['Address']
- host_info['ec2_configuration_endpoint_port'] = value['Port']
-
- # Target: Cache Nodes and Redis Cache Clusters (single node)
- if key == 'ec2_endpoint' and value:
- host_info['ec2_endpoint_address'] = value['Address']
- host_info['ec2_endpoint_port'] = value['Port']
-
- # Target: Redis Replication Groups
- if key == 'ec2_node_groups' and value:
- host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
- host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
- replica_count = 0
- for node in value[0]['NodeGroupMembers']:
- if node['CurrentRole'] == 'primary':
- host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
- host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
- host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
- elif node['CurrentRole'] == 'replica':
- host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
- host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
- host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
- replica_count += 1
-
- # Target: Redis Replication Groups
- if key == 'ec2_member_clusters' and value:
- host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
-
- # Target: All Cache Clusters
- elif key == 'ec2_cache_parameter_group':
- host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
- host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
- host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
-
- # Target: Almost everything
- elif key == 'ec2_security_groups':
-
- # Skip if SecurityGroups is None
- # (it is possible to have the key defined but no value in it).
- if value is not None:
- sg_ids = []
- for sg in value:
- sg_ids.append(sg['SecurityGroupId'])
- host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
-
- # Target: Everything
- # Preserve booleans and integers
- elif type(value) in [int, bool]:
- host_info[key] = value
-
- # Target: Everything
- # Sanitize string values
- elif isinstance(value, six.string_types):
- host_info[key] = value.strip()
-
- # Target: Everything
- # Replace None by an empty string
- elif type(value) == type(None):
- host_info[key] = ''
-
- else:
- # Remove non-processed complex types
- pass
-
- return host_info
-
- def get_host_info(self):
- ''' Get variables about a specific host '''
-
- if len(self.index) == 0:
- # Need to load index from cache
- self.load_index_from_cache()
-
- if not self.args.host in self.index:
- # try updating the cache
- self.do_api_calls_update_cache()
- if not self.args.host in self.index:
- # host might not exist anymore
- return self.json_format_dict({}, True)
-
- (region, instance_id) = self.index[self.args.host]
-
- instance = self.get_instance(region, instance_id)
- return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
-
- def push(self, my_dict, key, element):
- ''' Push an element onto an array that may not have been defined in
- the dict '''
- group_info = my_dict.setdefault(key, [])
- if isinstance(group_info, dict):
- host_list = group_info.setdefault('hosts', [])
- host_list.append(element)
- else:
- group_info.append(element)
-
- def push_group(self, my_dict, key, element):
- ''' Push a group as a child of another group. '''
- parent_group = my_dict.setdefault(key, {})
- if not isinstance(parent_group, dict):
- parent_group = my_dict[key] = {'hosts': parent_group}
- child_groups = parent_group.setdefault('children', [])
- if element not in child_groups:
- child_groups.append(element)
-
- def get_inventory_from_cache(self):
- ''' Reads the inventory from the cache file and returns it as a JSON
- object '''
-
- cache = open(self.cache_path_cache, 'r')
- json_inventory = cache.read()
- return json_inventory
-
-
- def load_index_from_cache(self):
- ''' Reads the index from the cache file sets self.index '''
-
- cache = open(self.cache_path_index, 'r')
- json_index = cache.read()
- self.index = json.loads(json_index)
-
-
- def write_to_cache(self, data, filename):
- ''' Writes data in JSON format to a file '''
-
- json_data = self.json_format_dict(data, True)
- cache = open(filename, 'w')
- cache.write(json_data)
- cache.close()
-
- def uncammelize(self, key):
- temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
- return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
-
- def to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
- regex = "[^A-Za-z0-9\_"
- if not self.replace_dash_in_groups:
- regex += "\-"
- return re.sub(regex + "]", "_", word)
-
- def json_format_dict(self, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-
-# Run the script
-Ec2Inventory()
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
deleted file mode 100644
index 3996e577e..000000000
--- a/inventory/aws/hosts/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 385278f3b..ad69bd587 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -56,6 +56,9 @@ openshift_release=v3.6
#openshift_use_node_system_container=False
#openshift_use_master_system_container=False
#openshift_use_etcd_system_container=False
+#
+# In either case, system_images_registry must be specified to be able to find the system images
+#system_images_registry="docker.io"
# Install the openshift examples
#openshift_install_examples=true
@@ -111,7 +114,7 @@ openshift_release=v3.6
# Instead of using docker, replacec it with cri-o
# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
# just as container-engine does.
-#openshift_docker_use_crio=False
+#openshift_use_crio=False
# Force the registry to use for the docker/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
@@ -377,45 +380,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
-#openshift_hosted_routers:
-#- name: router1
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router1
-# images: "openshift3/ose-${component}:${version}"
-# edits: []
-# certificate:
-# certfile: /path/to/certificate/abc.crt
-# keyfile: /path/to/certificate/abc.key
-# cafile: /path/to/certificate/ca.crt
-#- name: router2
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router2
-# images: "openshift3/ose-${component}:${version}"
-# certificate:
-# certfile: /path/to/certificate/xyz.crt
-# keyfile: /path/to/certificate/xyz.key
-# cafile: /path/to/certificate/ca.crt
-# edits:
-# # ROUTE_LABELS sets the router to listen for routes
-# # tagged with the provided values
-# - key: spec.template.spec.containers[0].env
-# value:
-# name: ROUTE_LABELS
-# value: "route=external"
-# action: append
#
+#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
+
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
@@ -571,7 +538,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
# Configure the prefix and version for the component images
#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_metrics_deployer_version=3.6.0
+#openshift_hosted_metrics_deployer_version=v3.6.0
#
# StorageClass
# openshift_storageclass_name=gp2
@@ -626,7 +593,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_logging_deployer_version=3.6.0
+#openshift_hosted_logging_deployer_version=v3.6.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 87fdee904..b52806bc7 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -56,6 +56,9 @@ openshift_release=v3.6
#openshift_use_node_system_container=False
#openshift_use_master_system_container=False
#openshift_use_etcd_system_container=False
+#
+# In either case, system_images_registry must be specified to be able to find the system images
+#system_images_registry="registry.access.redhat.com"
# Install the openshift examples
#openshift_install_examples=true
@@ -111,7 +114,7 @@ openshift_release=v3.6
# Install and run cri-o along side docker
# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
# just as container-engine does.
-#openshift_docker_use_crio=False
+#openshift_use_crio=False
# Force the registry to use for the container-engine/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
@@ -167,6 +170,14 @@ openshift_release=v3.6
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
+# If oreg_url points to a registry requiring authentication, provide the following:
+#oreg_auth_user=some_user
+#oreg_auth_password='my-pass'
+# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
+# oreg_auth_pass should be generated from running docker login.
+# To update registry auth credentials, uncomment the following:
+#oreg_auth_credentials_replace: True
+
# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
#openshift_repos_enable_testing=false
@@ -376,44 +387,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
-#openshift_hosted_routers:
-#- name: router1
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router1
-# images: "openshift3/ose-${component}:${version}"
-# edits: []
-# certificate:
-# certfile: /path/to/certificate/abc.crt
-# keyfile: /path/to/certificate/abc.key
-# cafile: /path/to/certificate/ca.crt
-#- name: router2
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router2
-# images: "openshift3/ose-${component}:${version}"
-# certificate:
-# certfile: /path/to/certificate/xyz.crt
-# keyfile: /path/to/certificate/xyz.key
-# cafile: /path/to/certificate/ca.crt
-# edits:
-# # ROUTE_LABELS sets the router to listen for routes
-# # tagged with the provided values
-# - key: spec.template.spec.containers[0].env
-# value:
-# name: ROUTE_LABELS
-# value: "route=external"
-# action: append
+#
+#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
deleted file mode 100755
index 2be46a58c..000000000
--- a/inventory/gce/hosts/gce.py
+++ /dev/null
@@ -1,477 +0,0 @@
-#!/usr/bin/env python2
-# pylint: skip-file
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-GCE external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API requests
-Google Compute Engine via the libcloud library. Full install/configuration
-instructions for the gce* modules can be found in the comments of
-ansible/test/gce_tests.py.
-
-When run against a specific host, this script returns the following variables
-based on the data obtained from the libcloud Node object:
- - gce_uuid
- - gce_id
- - gce_image
- - gce_machine_type
- - gce_private_ip
- - gce_public_ip
- - gce_name
- - gce_description
- - gce_status
- - gce_zone
- - gce_tags
- - gce_metadata
- - gce_network
-
-When run in --list mode, instances are grouped by the following categories:
- - zone:
- zone group name examples are us-central1-b, europe-west1-a, etc.
- - instance tags:
- An entry is created for each tag. For example, if you have two instances
- with a common tag called 'foo', they will both be grouped together under
- the 'tag_foo' name.
- - network name:
- the name of the network is appended to 'network_' (e.g. the 'default'
- network will result in a group named 'network_default')
- - machine type
- types follow a pattern like n1-standard-4, g1-small, etc.
- - running status:
- group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- - image:
- when using an ephemeral/scratch disk, this will be set to the image name
- used when creating the instance (e.g. debian-7-wheezy-v20130816). when
- your instance was created with a root persistent disk it will be set to
- 'persistent_disk' since there is no current way to determine the image.
-
-Examples:
- Execute uname on all instances in the us-central1-a zone
- $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
-
- Use the GCE inventory script to print out instance specific information
- $ contrib/inventory/gce.py --host my_instance
-
-Author: Eric Johnson <erjohnso@google.com>
-Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
-Version: 0.0.3
-'''
-
-__requires__ = ['pycrypto>=2.6']
-try:
- import pkg_resources
-except ImportError:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. We don't
- # fail here as there is code that better expresses the errors where the
- # library is used.
- pass
-
-USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
-USER_AGENT_VERSION="v2"
-
-import sys
-import os
-import argparse
-
-from time import time
-
-import ConfigParser
-
-import logging
-logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-try:
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- _ = Provider.GCE
-except:
- sys.exit("GCE inventory script requires libcloud >= 0.13")
-
-
-class CloudInventoryCache(object):
- def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
- cache_max_age=300):
- cache_dir = os.path.expanduser(cache_path)
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
- self.cache_path_cache = os.path.join(cache_dir, cache_name)
-
- self.cache_max_age = cache_max_age
-
- def is_valid(self, max_age=None):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if max_age is None:
- max_age = self.cache_max_age
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + max_age) > current_time:
- return True
-
- return False
-
- def get_all_data_from_cache(self, filename=''):
- ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
-
- data = ''
- if not filename:
- filename = self.cache_path_cache
- with open(filename, 'r') as cache:
- data = cache.read()
- return json.loads(data)
-
- def write_to_cache(self, data, filename=''):
- ''' Writes data to file as JSON. Returns True. '''
- if not filename:
- filename = self.cache_path_cache
- json_data = json.dumps(data)
- with open(filename, 'w') as cache:
- cache.write(json_data)
- return True
-
-
-class GceInventory(object):
- def __init__(self):
- # Cache object
- self.cache = None
- # dictionary containing inventory read from disk
- self.inventory = {}
-
- # Read settings and parse CLI arguments
- self.parse_cli_args()
- self.config = self.get_config()
- self.driver = self.get_gce_driver()
- self.ip_type = self.get_inventory_options()
- if self.ip_type:
- self.ip_type = self.ip_type.lower()
-
- # Cache management
- start_inventory_time = time()
- cache_used = False
- if self.args.refresh_cache or not self.cache.is_valid():
- self.do_api_calls_update_cache()
- else:
- self.load_inventory_from_cache()
- cache_used = True
- self.inventory['_meta']['stats'] = {'use_cache': True}
- self.inventory['_meta']['stats'] = {
- 'inventory_load_time': time() - start_inventory_time,
- 'cache_used': cache_used
- }
-
- # Just display data for specific host
- if self.args.host:
- print(self.json_format_dict(
- self.inventory['_meta']['hostvars'][self.args.host],
- pretty=self.args.pretty))
- else:
- # Otherwise, assume user wants all instances grouped
- zones = self.parse_env_zones()
- print(self.json_format_dict(self.inventory,
- pretty=self.args.pretty))
- sys.exit(0)
-
- def get_config(self):
- """
- Reads the settings from the gce.ini file.
-
- Populates a SafeConfigParser object with defaults and
- attempts to read an .ini-style configuration from the filename
- specified in GCE_INI_PATH. If the environment variable is
- not present, the filename defaults to gce.ini in the current
- working directory.
- """
- gce_ini_default_path = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "gce.ini")
- gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
-
- # Create a ConfigParser.
- # This provides empty defaults to each key, so that environment
- # variable configuration (as opposed to INI configuration) is able
- # to work.
- config = ConfigParser.SafeConfigParser(defaults={
- 'gce_service_account_email_address': '',
- 'gce_service_account_pem_file_path': '',
- 'gce_project_id': '',
- 'libcloud_secrets': '',
- 'inventory_ip_type': '',
- 'cache_path': '~/.ansible/tmp',
- 'cache_max_age': '300'
- })
- if 'gce' not in config.sections():
- config.add_section('gce')
- if 'inventory' not in config.sections():
- config.add_section('inventory')
- if 'cache' not in config.sections():
- config.add_section('cache')
-
- config.read(gce_ini_path)
-
- #########
- # Section added for processing ini settings
- #########
-
- # Set the instance_states filter based on config file options
- self.instance_states = []
- if config.has_option('gce', 'instance_states'):
- states = config.get('gce', 'instance_states')
- # Ignore if instance_states is an empty string.
- if states:
- self.instance_states = states.split(',')
-
- # Caching
- cache_path = config.get('cache', 'cache_path')
- cache_max_age = config.getint('cache', 'cache_max_age')
- # TOOD(supertom): support project-specific caches
- cache_name = 'ansible-gce.cache'
- self.cache = CloudInventoryCache(cache_path=cache_path,
- cache_max_age=cache_max_age,
- cache_name=cache_name)
- return config
-
- def get_inventory_options(self):
- """Determine inventory options. Environment variables always
- take precedence over configuration files."""
- ip_type = self.config.get('inventory', 'inventory_ip_type')
- # If the appropriate environment variables are set, they override
- # other configuration
- ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
- return ip_type
-
- def get_gce_driver(self):
- """Determine the GCE authorization settings and return a
- libcloud driver.
- """
- # Attempt to get GCE params from a configuration file, if one
- # exists.
- secrets_path = self.config.get('gce', 'libcloud_secrets')
- secrets_found = False
- try:
- import secrets
- args = list(getattr(secrets, 'GCE_PARAMS', []))
- kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
- secrets_found = True
- except:
- pass
-
- if not secrets_found and secrets_path:
- if not secrets_path.endswith('secrets.py'):
- err = "Must specify libcloud secrets file as "
- err += "/absolute/path/to/secrets.py"
- sys.exit(err)
- sys.path.append(os.path.dirname(secrets_path))
- try:
- import secrets
- args = list(getattr(secrets, 'GCE_PARAMS', []))
- kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
- secrets_found = True
- except:
- pass
- if not secrets_found:
- args = [
- self.config.get('gce','gce_service_account_email_address'),
- self.config.get('gce','gce_service_account_pem_file_path')
- ]
- kwargs = {'project': self.config.get('gce', 'gce_project_id')}
-
- # If the appropriate environment variables are set, they override
- # other configuration; process those into our args and kwargs.
- args[0] = os.environ.get('GCE_EMAIL', args[0])
- args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
- kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
-
- # Retrieve and return the GCE driver.
- gce = get_driver(Provider.GCE)(*args, **kwargs)
- gce.connection.user_agent_append(
- '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
- )
- return gce
-
- def parse_env_zones(self):
- '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
- If provided, this will be used to filter the results of the grouped_instances call'''
- import csv
- reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
- zones = [r for r in reader]
- return [z for z in zones[0]]
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file based on GCE')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all information about an instance')
- parser.add_argument('--pretty', action='store_true', default=False,
- help='Pretty format (default: False)')
- parser.add_argument(
- '--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests (default: False - use cache files)')
- self.args = parser.parse_args()
-
-
- def node_to_dict(self, inst):
- md = {}
-
- if inst is None:
- return {}
-
- if 'items' in inst.extra['metadata']:
- for entry in inst.extra['metadata']['items']:
- md[entry['key']] = entry['value']
-
- net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- # default to exernal IP unless user has specified they prefer internal
- if self.ip_type == 'internal':
- ssh_host = inst.private_ips[0]
- else:
- ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
-
- return {
- 'gce_uuid': inst.uuid,
- 'gce_id': inst.id,
- 'gce_image': inst.image,
- 'gce_machine_type': inst.size,
- 'gce_private_ip': inst.private_ips[0],
- 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
- 'gce_name': inst.name,
- 'gce_description': inst.extra['description'],
- 'gce_status': inst.extra['status'],
- 'gce_zone': inst.extra['zone'].name,
- 'gce_tags': inst.extra['tags'],
- 'gce_metadata': md,
- 'gce_network': net,
- # Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': ssh_host
- }
-
- def load_inventory_from_cache(self):
- ''' Loads inventory from JSON on disk. '''
-
- try:
- self.inventory = self.cache.get_all_data_from_cache()
- hosts = self.inventory['_meta']['hostvars']
- except Exception as e:
- print(
- "Invalid inventory file %s. Please rebuild with -refresh-cache option."
- % (self.cache.cache_path_cache))
- raise
-
- def do_api_calls_update_cache(self):
- ''' Do API calls and save data in cache. '''
- zones = self.parse_env_zones()
- data = self.group_instances(zones)
- self.cache.write_to_cache(data)
- self.inventory = data
-
- def list_nodes(self):
- all_nodes = []
- params, more_results = {'maxResults': 500}, True
- while more_results:
- self.driver.connection.gce_params=params
- all_nodes.extend(self.driver.list_nodes())
- more_results = 'pageToken' in params
- return all_nodes
-
- def group_instances(self, zones=None):
- '''Group all instances'''
- groups = {}
- meta = {}
- meta["hostvars"] = {}
-
- for node in self.list_nodes():
-
- # This check filters on the desired instance states defined in the
- # config file with the instance_states config option.
- #
- # If the instance_states list is _empty_ then _ALL_ states are returned.
- #
- # If the instance_states list is _populated_ then check the current
- # state against the instance_states list
- if self.instance_states and not node.extra['status'] in self.instance_states:
- continue
-
- name = node.name
-
- meta["hostvars"][name] = self.node_to_dict(node)
-
- zone = node.extra['zone'].name
-
- # To avoid making multiple requests per zone
- # we list all nodes and then filter the results
- if zones and zone not in zones:
- continue
-
- if zone in groups: groups[zone].append(name)
- else: groups[zone] = [name]
-
- tags = node.extra['tags']
- for t in tags:
- if t.startswith('group-'):
- tag = t[6:]
- else:
- tag = 'tag_%s' % t
- if tag in groups: groups[tag].append(name)
- else: groups[tag] = [name]
-
- net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
- net = 'network_%s' % net
- if net in groups: groups[net].append(name)
- else: groups[net] = [name]
-
- machine_type = node.size
- if machine_type in groups: groups[machine_type].append(name)
- else: groups[machine_type] = [name]
-
- image = node.image and node.image or 'persistent_disk'
- if image in groups: groups[image].append(name)
- else: groups[image] = [name]
-
- status = node.extra['status']
- stat = 'status_%s' % status.lower()
- if stat in groups: groups[stat].append(name)
- else: groups[stat] = [name]
-
- groups["_meta"] = meta
-
- return groups
-
- def json_format_dict(self, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-# Run the script
-if __name__ == '__main__':
- GceInventory()
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
deleted file mode 100644
index 3996e577e..000000000
--- a/inventory/gce/hosts/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
deleted file mode 100644
index 3996e577e..000000000
--- a/inventory/libvirt/hosts/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/libvirt/hosts/libvirt.ini b/inventory/libvirt/hosts/libvirt.ini
deleted file mode 100644
index 62ff204dd..000000000
--- a/inventory/libvirt/hosts/libvirt.ini
+++ /dev/null
@@ -1,20 +0,0 @@
-# Ansible libvirt external inventory script settings
-#
-
-[libvirt]
-
-uri = qemu:///system
-
-# API calls to libvirt can be slow. For this reason, we cache the results of an API
-# call. Set this to the path you want cache files to be written to. Two files
-# will be written to this directory:
-# - ansible-libvirt.cache
-# - ansible-libvirt.index
-cache_path = /tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-cache_max_age = 900
-
-
-
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
deleted file mode 100755
index d63e07b64..000000000
--- a/inventory/libvirt/hosts/libvirt_generic.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python2
-# pylint: skip-file
-
-'''
-libvirt external inventory script
-=================================
-
-Ansible has a feature where instead of reading from /etc/ansible/hosts
-as a text file, it can query external programs to obtain the list
-of hosts, groups the hosts are in, and even variables to assign to each host.
-
-To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
-This, more or less, allows you to keep one central database containing
-info about all of your managed instances.
-
-'''
-
-# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-import argparse
-import ConfigParser
-import os
-import sys
-import libvirt
-import xml.etree.ElementTree as ET
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-
-class LibvirtInventory(object):
- ''' libvirt dynamic inventory '''
-
- def __init__(self):
- ''' Main execution path '''
-
- self.inventory = dict() # A list of groups and the hosts in that group
- self.cache = dict() # Details about hosts in the inventory
-
- # Read settings and parse CLI arguments
- self.read_settings()
- self.parse_cli_args()
-
- if self.args.host:
- print(_json_format_dict(self.get_host_info(), self.args.pretty))
- elif self.args.list:
- print(_json_format_dict(self.get_inventory(), self.args.pretty))
- else: # default action with no options
- print(_json_format_dict(self.get_inventory(), self.args.pretty))
-
- def read_settings(self):
- ''' Reads the settings from the libvirt.ini file '''
-
- config = ConfigParser.SafeConfigParser()
- config.read(
- os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
- )
- self.libvirt_uri = config.get('libvirt', 'uri')
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file based on libvirt'
- )
- parser.add_argument(
- '--list',
- action='store_true',
- default=True,
- help='List instances (default: True)'
- )
- parser.add_argument(
- '--host',
- action='store',
- help='Get all the variables about a specific instance'
- )
- parser.add_argument(
- '--pretty',
- action='store_true',
- default=False,
- help='Pretty format (default: False)'
- )
- self.args = parser.parse_args()
-
- def get_host_info(self):
- ''' Get variables about a specific host '''
-
- inventory = self.get_inventory()
- if self.args.host in inventory['_meta']['hostvars']:
- return inventory['_meta']['hostvars'][self.args.host]
-
- def get_inventory(self):
- ''' Construct the inventory '''
-
- inventory = dict(_meta=dict(hostvars=dict()))
-
- conn = libvirt.openReadOnly(self.libvirt_uri)
- if conn is None:
- print("Failed to open connection to %s" % self.libvirt_uri)
- sys.exit(1)
-
- domains = conn.listAllDomains()
- if domains is None:
- print("Failed to list domains for connection %s" % self.libvirt_uri)
- sys.exit(1)
-
- for domain in domains:
- hostvars = dict(libvirt_name=domain.name(),
- libvirt_id=domain.ID(),
- libvirt_uuid=domain.UUIDString())
- domain_name = domain.name()
-
- # TODO: add support for guests that are not in a running state
- state, _ = domain.state()
- # 2 is the state for a running guest
- if state != 1:
- continue
-
- hostvars['libvirt_status'] = 'running'
-
- root = ET.fromstring(domain.XMLDesc())
- ansible_ns = {'ansible': 'https://github.com/ansible/ansible'}
- for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ansible_ns):
- tag = tag_elem.text
- _push(inventory, "tag_%s" % tag, domain_name)
- _push(hostvars, 'libvirt_tags', tag)
-
- # TODO: support more than one network interface, also support
- # interface types other than 'network'
- interface = root.find("./devices/interface[@type='network']")
- if interface is not None:
- source_elem = interface.find('source')
- mac_elem = interface.find('mac')
- if source_elem is not None and \
- mac_elem is not None:
- # Adding this to disable pylint check specifically
- # ignoring libvirt-python versions that
- # do not include DHCPLeases
- # This is needed until we upgrade the build bot to
- # RHEL7 (>= 1.2.6 libvirt)
- # pylint: disable=no-member
- dhcp_leases = conn.networkLookupByName(source_elem.get('network')) \
- .DHCPLeases(mac_elem.get('address'))
- if len(dhcp_leases) > 0:
- ip_address = dhcp_leases[0]['ipaddr']
- hostvars['ansible_ssh_host'] = ip_address
- hostvars['libvirt_ip_address'] = ip_address
-
- inventory['_meta']['hostvars'][domain_name] = hostvars
-
- return inventory
-
-def _push(my_dict, key, element):
- '''
- Push element to the my_dict[key] list.
- After having initialized my_dict[key] if it dosn't exist.
- '''
-
- if key in my_dict:
- my_dict[key].append(element)
- else:
- my_dict[key] = [element]
-
-def _json_format_dict(data, pretty=False):
- ''' Serialize data to a JSON formated str '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
-LibvirtInventory()
diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts
deleted file mode 100644
index 9b63e98f4..000000000
--- a/inventory/openstack/hosts/hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost ansible_become=no ansible_python_interpreter='/usr/bin/env python2' connection=local
diff --git a/inventory/openstack/hosts/openstack.py b/inventory/openstack/hosts/openstack.py
deleted file mode 100755
index deefd3b5d..000000000
--- a/inventory/openstack/hosts/openstack.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/env python
-# pylint: skip-file
-
-# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
-# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
-# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2016, Rackspace Australia
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-
-# The OpenStack Inventory module uses os-client-config for configuration.
-# https://github.com/stackforge/os-client-config
-# This means it will either:
-# - Respect normal OS_* environment variables like other OpenStack tools
-# - Read values from a clouds.yaml file.
-# If you want to configure via clouds.yaml, you can put the file in:
-# - Current directory
-# - ~/.config/openstack/clouds.yaml
-# - /etc/openstack/clouds.yaml
-# - /etc/ansible/openstack.yml
-# The clouds.yaml file can contain entries for multiple clouds and multiple
-# regions of those clouds. If it does, this inventory module will connect to
-# all of them and present them as one contiguous inventory.
-#
-# See the adjacent openstack.yml file for an example config file
-# There are two ansible inventory specific options that can be set in
-# the inventory section.
-# expand_hostvars controls whether or not the inventory will make extra API
-# calls to fill out additional information about each server
-# use_hostnames changes the behavior from registering every host with its UUID
-# and making a group of its hostname to only doing this if the
-# hostname in question has more than one server
-# fail_on_errors causes the inventory to fail and return no hosts if one cloud
-# has failed (for example, bad credentials or being offline).
-# When set to False, the inventory will return hosts from
-# whichever other clouds it can contact. (Default: True)
-
-import argparse
-import collections
-import os
-import sys
-import time
-from distutils.version import StrictVersion
-
-try:
- import json
-except:
- import simplejson as json
-
-import os_client_config
-import shade
-import shade.inventory
-
-CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
-
-
-def get_groups_from_server(server_vars, namegroup=True):
- groups = []
-
- region = server_vars['region']
- cloud = server_vars['cloud']
- metadata = server_vars.get('metadata', {})
-
- # Create a group for the cloud
- groups.append(cloud)
-
- # Create a group on region
- groups.append(region)
-
- # And one by cloud_region
- groups.append("%s_%s" % (cloud, region))
-
- # Check if group metadata key in servers' metadata
- if 'group' in metadata:
- groups.append(metadata['group'])
-
- for extra_group in metadata.get('groups', '').split(','):
- if extra_group:
- groups.append(extra_group.strip())
-
- groups.append('instance-%s' % server_vars['id'])
- if namegroup:
- groups.append(server_vars['name'])
-
- for key in ('flavor', 'image'):
- if 'name' in server_vars[key]:
- groups.append('%s-%s' % (key, server_vars[key]['name']))
-
- for key, value in iter(metadata.items()):
- groups.append('meta-%s_%s' % (key, value))
-
- az = server_vars.get('az', None)
- if az:
- # Make groups for az, region_az and cloud_region_az
- groups.append(az)
- groups.append('%s_%s' % (region, az))
- groups.append('%s_%s_%s' % (cloud, region, az))
- return groups
-
-
-def get_host_groups(inventory, refresh=False):
- (cache_file, cache_expiration_time) = get_cache_settings()
- if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
- groups = to_json(get_host_groups_from_cloud(inventory))
- open(cache_file, 'w').write(groups)
- else:
- groups = open(cache_file, 'r').read()
- return groups
-
-
-def append_hostvars(hostvars, groups, key, server, namegroup=False):
- hostvars[key] = dict(
- ansible_ssh_host=server['interface_ip'],
- openstack=server)
- for group in get_groups_from_server(server, namegroup=namegroup):
- groups[group].append(key)
-
-
-def get_host_groups_from_cloud(inventory):
- groups = collections.defaultdict(list)
- firstpass = collections.defaultdict(list)
- hostvars = {}
- list_args = {}
- if hasattr(inventory, 'extra_config'):
- use_hostnames = inventory.extra_config['use_hostnames']
- list_args['expand'] = inventory.extra_config['expand_hostvars']
- if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
- list_args['fail_on_cloud_config'] = \
- inventory.extra_config['fail_on_errors']
- else:
- use_hostnames = False
-
- for server in inventory.list_hosts(**list_args):
-
- if 'interface_ip' not in server:
- continue
- firstpass[server['name']].append(server)
- for name, servers in firstpass.items():
- if len(servers) == 1 and use_hostnames:
- append_hostvars(hostvars, groups, name, servers[0])
- else:
- server_ids = set()
- # Trap for duplicate results
- for server in servers:
- server_ids.add(server['id'])
- if len(server_ids) == 1 and use_hostnames:
- append_hostvars(hostvars, groups, name, servers[0])
- else:
- for server in servers:
- append_hostvars(
- hostvars, groups, server['id'], server,
- namegroup=True)
- groups['_meta'] = {'hostvars': hostvars}
- return groups
-
-
-def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
- ''' Determines if cache file has expired, or if it is still valid '''
- if refresh:
- return True
- if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
- mod_time = os.path.getmtime(cache_file)
- current_time = time.time()
- if (mod_time + cache_expiration_time) > current_time:
- return False
- return True
-
-
-def get_cache_settings():
- config = os_client_config.config.OpenStackConfig(
- config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
- # For inventory-wide caching
- cache_expiration_time = config.get_cache_expiration_time()
- cache_path = config.get_cache_path()
- if not os.path.exists(cache_path):
- os.makedirs(cache_path)
- cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
- return (cache_file, cache_expiration_time)
-
-
-def to_json(in_dict):
- return json.dumps(in_dict, sort_keys=True, indent=2)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
- parser.add_argument('--private',
- action='store_true',
- help='Use private address for ansible host')
- parser.add_argument('--refresh', action='store_true',
- help='Refresh cached information')
- parser.add_argument('--debug', action='store_true', default=False,
- help='Enable debug output')
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('--list', action='store_true',
- help='List active servers')
- group.add_argument('--host', help='List details about the specific host')
-
- return parser.parse_args()
-
-
-def main():
- args = parse_args()
- try:
- config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
- shade.simple_logging(debug=args.debug)
- inventory_args = dict(
- refresh=args.refresh,
- config_files=config_files,
- private=args.private,
- )
- if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
- inventory_args.update(dict(
- config_key='ansible',
- config_defaults={
- 'use_hostnames': False,
- 'expand_hostvars': True,
- 'fail_on_errors': True,
- }
- ))
-
- inventory = shade.inventory.OpenStackInventory(**inventory_args)
-
- if args.list:
- output = get_host_groups(inventory, refresh=args.refresh)
- elif args.host:
- output = to_json(inventory.get_host(args.host))
- print(output)
- except shade.OpenStackCloudException as e:
- sys.stderr.write('%s\n' % e.message)
- sys.exit(1)
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 1c3b1757c..095f43dd8 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,14 +10,14 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.104.0%{?dist}
+Release: 0.125.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible >= 2.2.2.0
+Requires: ansible >= 2.3
Requires: python2
Requires: python-six
Requires: tar
@@ -280,6 +280,155 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.125.0
+-
+
+* Tue Sep 05 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.124.0
+- Fix ansible_syntax check (rteague@redhat.com)
+- Standardize etcd entry point playbooks (rteague@redhat.com)
+- Adding deprecation checks to ansible_syntax (rteague@redhat.com)
+- Break out master config into stand-alone playbook (rteague@redhat.com)
+- Move all-in-one fail check to evaluate_groups.yml (rteague@redhat.com)
+- Break out node config into stand-alone playbook (rteague@redhat.com)
+- Adding another default to protect against missing name/desc
+ (kwoodson@redhat.com)
+- Removed dns role (mgugino@redhat.com)
+- Fix typo in variable names for glusterfs firewall configuration
+ (bacek@bacek.com)
+- disk_availability: fix bug where msg is overwritten (lmeyer@redhat.com)
+- Added firwall defaults to etcd role. (kwoodson@redhat.com)
+- Remove meta depends from clock (mgugino@redhat.com)
+- Only run migrate auth for < 3.7 (rteague@redhat.com)
+- Fix openshift_master upgrade (mgugino@redhat.com)
+- Merging openshift_node with openshift bootstrap. (kwoodson@redhat.com)
+- Test: Fail on entry point playbooks in common (rteague@redhat.com)
+- Bug 1467265 - logging: add 'purge' option with uninstall
+ (jwozniak@redhat.com)
+- openshift_checks: ignore hidden files in checks dir
+ (miciah.masters@gmail.com)
+
+* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.123.0
+-
+
+* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.122.0
+- Update openshift_hosted_routers example to be in ini format.
+ (abutcher@redhat.com)
+- Update calico to v2.5 (djosborne10@gmail.com)
+
+* Wed Aug 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.121.0
+- Revert "logging set memory request to limit" (sdodson@redhat.com)
+- Move firewall install and fix scaleup playbooks (rteague@redhat.com)
+- Fix group conditional requirements (rteague@redhat.com)
+- Updating openshift_service_catalog to use oc_service over oc_obj to resolve
+ idempotency issues being seen from rerunning role (ewolinet@redhat.com)
+- annotate the infra projects for logging to fix bz1480988
+ (jcantril@redhat.com)
+- docker_image_availability: timeout skopeo inspect (lmeyer@redhat.com)
+- Fix scaleup on containerized installations (sdodson@redhat.com)
+- bug 1480878. Default pvc for logging (jcantril@redhat.com)
+- logging set memory request to limit (jcantril@redhat.com)
+- openshift_cfme: add nfs directory support (fsimonce@redhat.com)
+
+* Tue Aug 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.120.0
+- Nuage changes to add custom mounts for atomic-openshift-node service
+ (rohan.s.parulekar@nuagenetworks.net)
+- Add independent registry auth support (mgugino@redhat.com)
+- roles: use openshift_use_crio (gscrivan@redhat.com)
+- cri-o: change to system runc (gscrivan@redhat.com)
+- cri-o: rename openshift_docker_use_crio to openshift_use_crio
+ (gscrivan@redhat.com)
+- Remove unsupported playbooks and utilities (rteague@redhat.com)
+- Updating default tag for enterprise installation for ASB
+ (ewolinet@redhat.com)
+- Only validate certificates that are passed to oc_route (zgalor@redhat.com)
+
+* Mon Aug 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.118.0
+- Fix origin metrics and logging container version
+ (gevorg15@users.noreply.github.com)
+- Removing deprecation warnings for when conditions. (kwoodson@redhat.com)
+- Default to global setting for firewall. (kwoodson@redhat.com)
+- system-containers: Fallback for system_images_registry (smilner@redhat.com)
+- inventory: Add system_images_registry example (smilner@redhat.com)
+- Remove near-meta role openshift_cli_facts (mgugino@redhat.com)
+- Update error message: s/non-unique/duplicate (rhcarvalho@gmail.com)
+- Make pylint disables more specific (rhcarvalho@gmail.com)
+- Handle exceptions in failure summary cb plugin (rhcarvalho@gmail.com)
+- Rewrite failure summary callback plugin (rhcarvalho@gmail.com)
+- Handle more exceptions when running checks (rhcarvalho@gmail.com)
+- List known checks/tags when check name is invalid (rhcarvalho@gmail.com)
+- List existing health checks when none is requested (rhcarvalho@gmail.com)
+- Add playbook for running arbitrary health checks (rhcarvalho@gmail.com)
+- Update health check README (rhcarvalho@gmail.com)
+- Standardize openshift_provisioners entry point (rteague@redhat.com)
+- Remove unused upgrade playbook (rteague@redhat.com)
+- Bug 1471322: logging roles based image versions (jwozniak@redhat.com)
+
+* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.117.0
+- Standardize openshift-checks code paths (rteague@redhat.com)
+
+* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.116.0
+- Add missing hostnames to registry cert (sdodson@redhat.com)
+
+* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.115.0
+-
+
+* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.114.0
+-
+
+* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.113.0
+- openshift_version: enterprise accepts new style pre-release
+ (smilner@redhat.com)
+- Nuage changes for Atomic hosts OSE Integration
+ (rohan.s.parulekar@nuagenetworks.net)
+
+* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.112.0
+- fix #5206. Default ES cpu limit (jcantril@redhat.com)
+
+* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.111.0
+- Upgrade check for OpenShift authorization objects (rteague@redhat.com)
+
+* Fri Aug 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.110.0
+- Setup tuned profiles in /etc/tuned (jmencak@redhat.com)
+
+* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.109.0
+-
+
+* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.108.0
+-
+
+* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.107.0
+-
+
+* Thu Aug 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.106.0
+- Add dotnet 2.0 to v3.6 (sdodson@redhat.com)
+- Add dotnet 2.0 to v3.7 (sdodson@redhat.com)
+- Update v3.6 content (sdodson@redhat.com)
+- Update all image streams and templates (sdodson@redhat.com)
+- Passing memory and cpu limit for ops ES install (ewolinet@redhat.com)
+- If IP4_NAMESERVERS are unset then pull the value from /etc/resolv.conf
+ (sdodson@redhat.com)
+- New tuned profile hierarchy. (jmencak@redhat.com)
+- GlusterFS: add minor README note for #5071 (jarrpa@redhat.com)
+- Update cfme templates to auto-generate postgresql password
+ https://bugzilla.redhat.com/show_bug.cgi?id=1461973 (simaishi@redhat.com)
+
+* Wed Aug 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.105.0
+- Fix generated content (sdodson@redhat.com)
+- Switch to migrating one host and forming a new cluster (sdodson@redhat.com)
+- First attempt at provisioning. (kwoodson@redhat.com)
+- First attempt at creating the cert signer. (kwoodson@redhat.com)
+- remove out of scope variable from exception message
+ (maxamillion@fedoraproject.org)
+- raise AosVersionException if no expected packages found by dnf query
+ (maxamillion@fedoraproject.org)
+- Fix missing space in calico ansible roles (djosborne10@gmail.com)
+- Allow GCS object storage to be configured (ccoleman@redhat.com)
+- add dnf support to roles/openshift_health_checker/library/aos_version.py
+ (maxamillion@fedoraproject.org)
+- Add hostname/nodename length check (mgugino@redhat.com)
+- Refactor openshift_hosted's docker-registry route setup (dms@redhat.com)
+- bug 1468987: kibana_proxy OOM (jwozniak@redhat.com)
+
* Sun Aug 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.104.0
- Ensure that openshift_node_facts has been called for dns_ip
(sdodson@redhat.com)
diff --git a/playbooks/README.md b/playbooks/README.md
index 5857a9f59..290d4c082 100644
--- a/playbooks/README.md
+++ b/playbooks/README.md
@@ -12,8 +12,6 @@ And:
- [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community
supported and not officially maintained.
-- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack)
- are related to the [`bin/cluster`](../bin) tool and its usage is deprecated.
Refer to the `README.md` file in each playbook directory for more information
about them.
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 99698b4d0..0fb29ca06 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -1,4 +1,268 @@
# AWS playbooks
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
+## Provisioning
+
+With recent desire for provisioning from customers and developers alike, the AWS
+ playbook directory now supports a limited set of ansible playbooks to achieve a
+ complete cluster setup. These playbooks bring into alignment our desire to
+ deploy highly scalable Openshift clusters utilizing AWS auto scale groups and
+ custom AMIs.
+
+### Where do I start?
+
+Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways:
+
+- Create the following file `~/.aws/credentials` with the contents (substitute your access key and secret key):
+ ```
+ [myaccount]
+ aws_access_key_id = <Your access_key here>
+ aws_secret_access_key = <Your secret acces key here>
+ ```
+ From the shell:
+ ```
+ $ export AWS_PROFILE=myaccount
+ ```
+ ---
+- Alternatively to using a profile you can export your AWS credentials as environment variables.
+ ```
+ $ export AWS_ACCESS_KEY_ID=AKIXXXXXX
+ $ export AWS_SECRET_ACCESS_KEY=XXXXXX
+ ```
+
+### Let's Provision!
+
+The newly added playbooks are the following:
+- build_ami.yml
+- provision.yml
+- provision_nodes.yml
+
+The current expected work flow should be to provide the `vars.yml` file with the
+desired settings for cluster instances. These settings are AWS specific and should
+be tailored to the consumer's AWS custom account settings.
+
+```yaml
+clusterid: mycluster
+region: us-east-1
+
+provision:
+ clusterid: "{{ clusterid }}"
+ region: "{{ region }}"
+
+ build:
+ base_image: ami-bdd5d6ab # base image for AMI to build from
+ # when creating an encrypted AMI please specify use_encryption
+ use_encryption: False
+
+ # for s3 registry backend
+ openshift_registry_s3: True
+
+ # if using custom certificates these are required for the ELB
+ iam_cert_ca:
+ name: test_openshift
+ cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
+ key_path: '/path/to/wildcard.<clusterid>.example.com.key'
+ chain_path: '/path/to/cert.ca.crt'
+
+ instance_users:
+ - key_name: myuser_key
+ username: myuser
+ pub_key: |
+ ssh-rsa aaa<place public ssh key here>aaaaa user@<clusterid>
+
+ node_group_config:
+ tags:
+ clusterid: "{{ clusterid }}"
+ environment: stg
+ ssh_key_name: myuser_key # name of the ssh key from above
+
+ # configure master settings here
+ master:
+ instance_type: m4.xlarge
+ ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+ health_check:
+ period: 60
+ type: EC2
+ # Set the following number to be the same for masters.
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+...
+ vpc:
+ # name: mycluster # If missing; will default to clusterid
+ cidr: 172.31.0.0/16
+ subnets:
+ us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
+ - cidr: 172.31.48.0/20
+ az: "us-east-1c"
+ - cidr: 172.31.32.0/20
+ az: "us-east-1e"
+ - cidr: 172.31.16.0/20
+ az: "us-east-1a"
+
+```
+
+Repeat the following setup for the infra and compute node groups. This most likely
+ will not need editing but if the install requires further customization then these parameters
+ can be updated.
+
+#### Step 1
+
+Create an openshift-ansible inventory file to use for a byo installation. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
+
+```ini
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+################################################################################
+# Ensure these variables are set for bootstrap
+################################################################################
+openshift_master_bootstrap_enabled=True
+
+openshift_hosted_router_wait=False
+openshift_hosted_registry_wait=False
+
+# Repository for installation
+openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}]
+
+################################################################################
+# cluster specific settings maybe be placed here
+
+[masters]
+
+[etcd]
+
+[nodes]
+```
+
+There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+
+In order to create the bootstrapable AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles.
+
+
+#### Step 2
+
+Once the vars.yml file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI.
+
+```
+$ ansible-playbook -i inventory.yml build_ami.yml
+```
+
+1. This script will build a VPC. Default name will be clusterid if not specified.
+2. Create an ssh key required for the instance.
+3. Create an instance.
+4. Run some setup roles to ensure packages and services are correctly configured.
+5. Create the AMI.
+6. If encryption is desired
+ - A KMS key is created with the name of $clusterid
+ - An encrypted AMI will be produced with $clusterid KMS key
+7. Terminate the instance used to configure the AMI.
+
+
+#### Step 3
+
+Now that we have created an AMI for our Openshift installation, that AMI id needs to be placed in the `vars.yml` file. To do so update the following fields (The AMI can be captured from the output of the previous step or found in the ec2 console under AMIs):
+
+```
+ # when creating an encrypted AMI please specify use_encryption
+ use_encryption: False # defaults to false
+```
+
+**Note**: If using encryption, specify with `use_encryption: True`. This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false. The AMI id will be fetched and used according to its most recent creation date.
+
+
+#### Step 4
+
+We are ready to create the master instances and install Openshift.
+
+```
+$ ansible-playbook -i <inventory from step 1> provision.yml
+```
+
+This playbook runs through the following steps:
+1. Ensures a VPC is created
+2. Ensures a SSH key exists
+3. Creates an s3 bucket for the registry named $clusterid
+4. Create master security groups
+5. Create a master launch config
+6. Create the master auto scaling groups
+7. If certificates are desired for ELB, they will be uploaded
+8. Create internal and external master ELBs
+9. Add newly created masters to the correct groups
+10. Set a couple of important facts for the masters
+11. Run the [`byo`](../../common/openshift-cluster/config.yml)
+
+At this point we have created a successful cluster with only the master nodes.
+
+
+#### Step 5
+
+Now that we have a cluster deployed it might be more interesting to create some node types. This can be done easily with the following playbook:
+
+```
+$ ansible-playbook provision_nodes.yml
+```
+
+Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
+
+#### Step 6
+
+The registration of our nodes can be automated by running the following script `accept.yml`. This script can handle the registration in a few different ways.
+- approve_all - **Note**: this option is for development and test environments. Security is bypassed
+- nodes - A list of node names that will be accepted into the cluster
+
+```yaml
+ oc_adm_csr:
+ #approve_all: True
+ nodes: < list of nodes here >
+ timeout: 0
+```
+Once the desired accept method is chosen, run the following playbook `accept.yml`:
+1. Run the following playbook.
+```
+$ ansible-playbook accept.yml
+```
+
+Login to a master and run the following command:
+```
+ssh root@<master ip address>
+$ oc --config=/etc/origin/master/admin.kubeconfig get csr
+node-bootstrapper-client-ip-172-31-49-148-ec2-internal 1h system:serviceaccount:openshift-infra:node-bootstrapper Approved,Issued
+node-bootstrapper-server-ip-172-31-49-148-ec2-internal 1h system:node:ip-172-31-49-148.ec2.internal Approved,Issued
+```
+
+Verify the `CONDITION` is `Approved,Issued` on the `csr` objects. There are two for each node required.
+1. `node-bootstrapper-client` is a request to access the api/controllers.
+2. `node-bootstrapper-server` is a request to join the cluster.
+
+Once this is complete, verify the nodes have joined the cluster and are `ready`.
+
+```
+$ oc --config=/etc/origin/master/admin.kubeconfig get nodes
+NAME STATUS AGE VERSION
+ip-172-31-49-148.ec2.internal Ready 1h v1.6.1+5115d708d7
+```
+
+### Ready To Work!
+
+At this point your cluster should be ready for workloads. Proceed to deploy applications on your cluster.
+
+### Still to compute
+
+There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
new file mode 100755
index 000000000..d43c84205
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -0,0 +1,48 @@
+---
+- name: Setup the vpc and the master node group
+ #hosts: oo_first_master
+ hosts: localhost
+ remote_user: root
+ gather_facts: no
+ tasks:
+ - name: get provisioning vars
+ include_vars: vars.yml
+
+ - name: bring lib_openshift into scope
+ include_role:
+ name: lib_openshift
+
+ - name: fetch masters
+ ec2_remote_facts:
+ region: "{{ provision.region }}"
+ filters:
+ "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: mastersout
+ retries: 20
+ delay: 3
+ until: "'instances' in mastersout and mastersout.instances|length > 0"
+
+ - name: fetch new node instances
+ ec2_remote_facts:
+ region: "{{ provision.region }}"
+ filters:
+ "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:host-type": node
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: "'instances' in instancesout and instancesout.instances|length > 0"
+
+ - debug:
+ msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}"
+
+ - name: approve nodes
+ oc_adm_csr:
+ #approve_all: True
+ nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}"
+ timeout: 0
+ register: nodeout
+ delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml
deleted file mode 100644
index 0e8eb90c1..000000000
--- a/playbooks/aws/openshift-cluster/add_nodes.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- oo_extend_env: True
- tasks:
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
-- include: scaleup.yml
-- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
new file mode 100644
index 000000000..d27874200
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -0,0 +1,150 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: get the necessary vars for ami building
+ include_vars: vars.yml
+
+ - name: create a vpc with the name <clusterid>
+ include_role:
+ name: openshift_aws_vpc
+ vars:
+ r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}"
+ r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}"
+ r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}"
+ r_openshift_aws_vpc_region: "{{ provision.region }}"
+ r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}"
+ r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}"
+
+ - name: create aws ssh keypair
+ include_role:
+ name: openshift_aws_ssh_keys
+ vars:
+ r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}"
+ r_openshift_aws_ssh_keys_region: "{{ provision.region }}"
+
+ - name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ provision.region }}"
+ filters:
+ "tag:Name": "{{ provision.vpc.subnets[provision.region][0].az }}"
+ register: subnetout
+
+ - name: create instance for ami creation
+ ec2:
+ assign_public_ip: yes
+ region: "{{ provision.region }}"
+ key_name: "{{ provision.node_group_config.ssh_key_name }}"
+ group: "{{ provision.clusterid }}"
+ instance_type: m4.xlarge
+ vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
+ image: "{{ provision.build.base_image }}"
+ volumes:
+ - device_name: /dev/sdb
+ volume_type: gp2
+ volume_size: 100
+ delete_on_termination: true
+ wait: yes
+ exact_count: 1
+ count_tag:
+ Name: ami_base
+ instance_tags:
+ Name: ami_base
+ register: amibase
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ amibase.tagged_instances.0.public_ip }}"
+ timeout: 300
+ search_regex: OpenSSH
+
+ - name: add host to nodes
+ add_host:
+ groups: nodes
+ name: "{{ amibase.tagged_instances.0.public_dns_name }}"
+
+ - name: set the user to perform installation
+ set_fact:
+ ansible_ssh_user: root
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/initialize_facts.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/initialize_openshift_repos.yml
+
+- hosts: nodes
+ remote_user: root
+ tasks:
+ - name: get the necessary vars for ami building
+ include_vars: vars.yml
+
+ - set_fact:
+ openshift_node_bootstrap: True
+
+ - name: run openshift image preparation
+ include_role:
+ name: openshift_node
+
+- hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: bundle ami
+ ec2_ami:
+ instance_id: "{{ amibase.tagged_instances.0.id }}"
+ region: "{{ provision.region }}"
+ state: present
+ description: "This was provisioned {{ ansible_date_time.iso8601 }}"
+ name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
+ tags: "{{ provision.build.openshift_ami_tags }}"
+ wait: yes
+ register: amioutput
+
+ - debug: var=amioutput
+
+ - when: provision.build.use_encryption | default(False)
+ block:
+ - name: setup kms key for encryption
+ include_role:
+ name: openshift_aws_iam_kms
+ vars:
+ r_openshift_aws_iam_kms_region: "{{ provision.region }}"
+ r_openshift_aws_iam_kms_alias: "alias/{{ provision.clusterid }}_kms"
+
+ - name: augment the encrypted ami tags with source-ami
+ set_fact:
+ source_tag:
+ source-ami: "{{ amioutput.image_id }}"
+
+ - name: copy the ami for encrypted disks
+ include_role:
+ name: openshift_aws_ami_copy
+ vars:
+ r_openshift_aws_ami_copy_region: "{{ provision.region }}"
+ r_openshift_aws_ami_copy_name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}-encrypted"
+ r_openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
+ r_openshift_aws_ami_copy_kms_alias: "alias/{{ provision.clusterid }}_kms"
+ r_openshift_aws_ami_copy_tags: "{{ source_tag | combine(provision.build.openshift_ami_tags) }}"
+ r_openshift_aws_ami_copy_encrypt: "{{ provision.build.use_encryption }}"
+ # this option currently fails due to boto waiters
+ # when supported this need to be reapplied
+ #r_openshift_aws_ami_copy_wait: True
+
+ - name: Display newly created encrypted ami id
+ debug:
+ msg: "{{ r_openshift_aws_ami_copy_retval_custom_ami }}"
+
+ - name: terminate temporary instance
+ ec2:
+ state: absent
+ region: "{{ provision.region }}"
+ instance_ids: "{{ amibase.tagged_instances.0.id }}"
diff --git a/playbooks/aws/openshift-cluster/build_node_group.yml b/playbooks/aws/openshift-cluster/build_node_group.yml
new file mode 100644
index 000000000..3ef492238
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/build_node_group.yml
@@ -0,0 +1,47 @@
+---
+- name: fetch recently created AMI
+ ec2_ami_find:
+ region: "{{ provision.region }}"
+ sort: creationDate
+ sort_order: descending
+ name: "{{ provision.build.ami_name }}*"
+ ami_tags: "{{ provision.build.openshift_ami_tags }}"
+ #no_result_action: fail
+ register: amiout
+
+- block:
+ - name: "Create {{ openshift_build_node_type }} sgs"
+ include_role:
+ name: openshift_aws_sg
+ vars:
+ r_openshift_aws_sg_clusterid: "{{ provision.clusterid }}"
+ r_openshift_aws_sg_region: "{{ provision.region }}"
+ r_openshift_aws_sg_type: "{{ openshift_build_node_type }}"
+
+ - name: "generate a launch config name for {{ openshift_build_node_type }}"
+ set_fact:
+ launch_config_name: "{{ provision.clusterid }}-{{ openshift_build_node_type }}-{{ ansible_date_time.epoch }}"
+
+ - name: create "{{ openshift_build_node_type }} launch config"
+ include_role:
+ name: openshift_aws_launch_config
+ vars:
+ r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
+ r_openshift_aws_launch_config_clusterid: "{{ provision.clusterid }}"
+ r_openshift_aws_launch_config_region: "{{ provision.region }}"
+ r_openshift_aws_launch_config: "{{ provision.node_group_config }}"
+ r_openshift_aws_launch_config_type: "{{ openshift_build_node_type }}"
+ r_openshift_aws_launch_config_custom_image: "{{ '' if 'results' not in amiout else amiout.results[0].ami_id }}"
+ r_openshift_aws_launch_config_bootstrap_token: "{{ (local_bootstrap['content'] |b64decode) if local_bootstrap is defined else '' }}"
+
+ - name: "create {{ openshift_build_node_type }} node groups"
+ include_role:
+ name: openshift_aws_node_group
+ vars:
+ r_openshift_aws_node_group_name: "{{ provision.clusterid }} openshift {{ openshift_build_node_type }}"
+ r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
+ r_openshift_aws_node_group_clusterid: "{{ provision.clusterid }}"
+ r_openshift_aws_node_group_region: "{{ provision.region }}"
+ r_openshift_aws_node_group_config: "{{ provision.node_group_config }}"
+ r_openshift_aws_node_group_type: "{{ openshift_build_node_type }}"
+ r_openshift_aws_node_group_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}"
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index c2f4dfedc..000000000
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
- | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
deleted file mode 100644
index 821a0f30e..000000000
--- a/playbooks/aws/openshift-cluster/config.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_node_labels:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }}"
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
deleted file mode 100644
index 3edace493..000000000
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - add_host:
- name: "{{ master_names.0 }}"
- groups: service_master
- when: master_names is defined and master_names.0 is defined
-
-- include: update.yml
-- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
deleted file mode 100644
index 99d0f44f0..000000000
--- a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/python
-#pylint: skip-file
-# flake8: noqa
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: ec2_ami_find
-version_added: 2.0
-short_description: Searches for AMIs to obtain the AMI ID and other information
-description:
- - Returns list of matching AMIs with AMI ID, along with other useful information
- - Can search AMIs with different owners
- - Can search by matching tag(s), by AMI name and/or other criteria
- - Results can be sorted and sliced
-author: Tom Bamford
-notes:
- - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- - See the example below for a suggestion of how to search by distro/release.
-options:
- region:
- description:
- - The AWS region to use.
- required: true
- aliases: [ 'aws_region', 'ec2_region' ]
- owner:
- description:
- - Search AMIs owned by the specified owner
- - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- - If not specified, all EC2 AMIs in the specified region will be searched.
- - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
- required: false
- default: null
- ami_id:
- description:
- - An AMI ID to match.
- default: null
- required: false
- ami_tags:
- description:
- - A hash/dictionary of tags to match for the AMI.
- default: null
- required: false
- architecture:
- description:
- - An architecture type to match (e.g. x86_64).
- default: null
- required: false
- hypervisor:
- description:
- - A hypervisor type type to match (e.g. xen).
- default: null
- required: false
- is_public:
- description:
- - Whether or not the image(s) are public.
- choices: ['yes', 'no']
- default: null
- required: false
- name:
- description:
- - An AMI name to match.
- default: null
- required: false
- platform:
- description:
- - Platform type to match.
- default: null
- required: false
- sort:
- description:
- - Optional attribute which with to sort the results.
- - If specifying 'tag', the 'tag_name' parameter is required.
- choices: ['name', 'description', 'tag']
- default: null
- required: false
- sort_tag:
- description:
- - Tag name with which to sort results.
- - Required when specifying 'sort=tag'.
- default: null
- required: false
- sort_order:
- description:
- - Order in which to sort results.
- - Only used when the 'sort' parameter is specified.
- choices: ['ascending', 'descending']
- default: 'ascending'
- required: false
- sort_start:
- description:
- - Which result to start with (when sorting).
- - Corresponds to Python slice notation.
- default: null
- required: false
- sort_end:
- description:
- - Which result to end with (when sorting).
- - Corresponds to Python slice notation.
- default: null
- required: false
- state:
- description:
- - AMI state to match.
- default: 'available'
- required: false
- virtualization_type:
- description:
- - Virtualization type to match (e.g. hvm).
- default: null
- required: false
- no_result_action:
- description:
- - What to do when no results are found.
- - "'success' reports success and returns an empty array"
- - "'fail' causes the module to report failure"
- choices: ['success', 'fail']
- default: 'success'
- required: false
-requirements:
- - boto
-
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Search for the AMI tagged "project:website"
-- ec2_ami_find:
- owner: self
- tags:
- project: website
- no_result_action: fail
- register: ami_find
-
-# Search for the latest Ubuntu 14.04 AMI
-- ec2_ami_find:
- name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
- owner: 099720109477
- sort: name
- sort_order: descending
- sort_end: 1
- register: ami_find
-
-# Launch an EC2 instance
-- ec2:
- image: "{{ ami_search.results[0].ami_id }}"
- instance_type: m4.medium
- key_name: mykey
- wait: yes
-'''
-
-try:
- import boto.ec2
- HAS_BOTO=True
-except ImportError:
- HAS_BOTO=False
-
-import json
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- region = dict(required=True,
- aliases = ['aws_region', 'ec2_region']),
- owner = dict(required=False, default=None),
- ami_id = dict(required=False),
- ami_tags = dict(required=False, type='dict',
- aliases = ['search_tags', 'image_tags']),
- architecture = dict(required=False),
- hypervisor = dict(required=False),
- is_public = dict(required=False),
- name = dict(required=False),
- platform = dict(required=False),
- sort = dict(required=False, default=None,
- choices=['name', 'description', 'tag']),
- sort_tag = dict(required=False),
- sort_order = dict(required=False, default='ascending',
- choices=['ascending', 'descending']),
- sort_start = dict(required=False),
- sort_end = dict(required=False),
- state = dict(required=False, default='available'),
- virtualization_type = dict(required=False),
- no_result_action = dict(required=False, default='success',
- choices = ['success', 'fail']),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module, install via pip or your package manager')
-
- ami_id = module.params.get('ami_id')
- ami_tags = module.params.get('ami_tags')
- architecture = module.params.get('architecture')
- hypervisor = module.params.get('hypervisor')
- is_public = module.params.get('is_public')
- name = module.params.get('name')
- owner = module.params.get('owner')
- platform = module.params.get('platform')
- sort = module.params.get('sort')
- sort_tag = module.params.get('sort_tag')
- sort_order = module.params.get('sort_order')
- sort_start = module.params.get('sort_start')
- sort_end = module.params.get('sort_end')
- state = module.params.get('state')
- virtualization_type = module.params.get('virtualization_type')
- no_result_action = module.params.get('no_result_action')
-
- filter = {'state': state}
-
- if ami_id:
- filter['image_id'] = ami_id
- if ami_tags:
- for tag in ami_tags:
- filter['tag:'+tag] = ami_tags[tag]
- if architecture:
- filter['architecture'] = architecture
- if hypervisor:
- filter['hypervisor'] = hypervisor
- if is_public:
- filter['is_public'] = is_public
- if name:
- filter['name'] = name
- if platform:
- filter['platform'] = platform
- if virtualization_type:
- filter['virtualization_type'] = virtualization_type
-
- ec2 = ec2_connect(module)
-
- images_result = ec2.get_all_images(owners=owner, filters=filter)
-
- if no_result_action == 'fail' and len(images_result) == 0:
- module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
-
- results = []
- for image in images_result:
- data = {
- 'ami_id': image.id,
- 'architecture': image.architecture,
- 'description': image.description,
- 'is_public': image.is_public,
- 'name': image.name,
- 'owner_id': image.owner_id,
- 'platform': image.platform,
- 'root_device_name': image.root_device_name,
- 'root_device_type': image.root_device_type,
- 'state': image.state,
- 'tags': image.tags,
- 'virtualization_type': image.virtualization_type,
- }
-
- if image.kernel_id:
- data['kernel_id'] = image.kernel_id
- if image.ramdisk_id:
- data['ramdisk_id'] = image.ramdisk_id
-
- results.append(data)
-
- if sort == 'tag':
- if not sort_tag:
- module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
- results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
- elif sort:
- results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
-
- try:
- if sort and sort_start and sort_end:
- results = results[int(sort_start):int(sort_end)]
- elif sort and sort_start:
- results = results[int(sort_start):]
- elif sort and sort_end:
- results = results[:int(sort_end)]
- except TypeError:
- module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
-
- module.exit_json(results=results)
-
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.ec2 import *
-
-if __name__ == '__main__':
- main()
-
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
deleted file mode 100644
index ed8aac398..000000000
--- a/playbooks/aws/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
- oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
new file mode 100644
index 000000000..dfbf61cc7
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -0,0 +1,157 @@
+---
+- name: Setup the vpc and the master node group
+ hosts: localhost
+ tasks:
+ - name: get provisioning vars
+ include_vars: vars.yml
+
+ - name: create default vpc
+ include_role:
+ name: openshift_aws_vpc
+ vars:
+ r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}"
+ r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}"
+ r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}"
+ r_openshift_aws_vpc_region: "{{ provision.region }}"
+ r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}"
+ r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}"
+
+ - name: create aws ssh keypair
+ include_role:
+ name: openshift_aws_ssh_keys
+ vars:
+ r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}"
+ r_openshift_aws_ssh_keys_region: "{{ provision.region }}"
+
+ - when: provision.openshift_registry_s3 | default(false)
+ name: create s3 bucket for registry
+ include_role:
+ name: openshift_aws_s3
+ vars:
+ r_openshift_aws_s3_clusterid: "{{ provision.clusterid }}-docker-registry"
+ r_openshift_aws_s3_region: "{{ provision.region }}"
+ r_openshift_aws_s3_mode: create
+
+ - name: include scale group creation for master
+ include: build_node_group.yml
+ vars:
+ openshift_build_node_type: master
+
+ - name: fetch new master instances
+ ec2_remote_facts:
+ region: "{{ provision.region }}"
+ filters:
+ "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: bring iam_cert23 into scope
+ include_role:
+ name: lib_utils
+
+ - name: upload certificates to AWS IAM
+ iam_cert23:
+ state: present
+ name: "{{ provision.clusterid }}-master-external"
+ cert: "{{ provision.iam_cert_ca.cert_path }}"
+ key: "{{ provision.iam_cert_ca.key_path }}"
+ cert_chain: "{{ provision.iam_cert_ca.chain_path | default(omit) }}"
+ register: elb_cert_chain
+ failed_when:
+ - "'failed' in elb_cert_chain"
+ - elb_cert_chain.failed
+ - "'msg' in elb_cert_chain"
+ - "'already exists' not in elb_cert_chain.msg"
+ when: provision.iam_cert_ca is defined
+
+ - debug: var=elb_cert_chain
+
+ - name: create our master external and internal load balancers
+ include_role:
+ name: openshift_aws_elb
+ vars:
+ r_openshift_aws_elb_clusterid: "{{ provision.clusterid }}"
+ r_openshift_aws_elb_region: "{{ provision.region }}"
+ r_openshift_aws_elb_instance_filter:
+ "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ r_openshift_aws_elb_type: master
+ r_openshift_aws_elb_direction: "{{ elb_item }}"
+ r_openshift_aws_elb_idle_timout: 400
+ r_openshift_aws_elb_scheme: internet-facing
+ r_openshift_aws_elb_security_groups:
+ - "{{ provision.clusterid }}"
+ - "{{ provision.clusterid }}_master"
+ r_openshift_aws_elb_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}"
+ r_openshift_aws_elb_name: "{{ provision.clusterid }}-master-{{ elb_item }}"
+ r_openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
+ with_items:
+ - internal
+ - external
+ loop_control:
+ loop_var: elb_item
+
+ - name: add new master to masters group
+ add_host:
+ groups: "masters,etcd,nodes"
+ name: "{{ item.public_ip_address }}"
+ hostname: "{{ provision.clusterid }}-master-{{ item.id[:-5] }}"
+ with_items: "{{ instancesout.instances }}"
+
+ - name: set facts for group normalization
+ set_fact:
+ cluster_id: "{{ provision.clusterid }}"
+ cluster_env: "{{ provision.node_group_config.tags.environment | default('dev') }}"
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
+
+
+- name: set the master facts for hostname to elb
+ hosts: masters
+ gather_facts: no
+ remote_user: root
+ tasks:
+ - name: include vars
+ include_vars: vars.yml
+
+ - name: fetch elbs
+ ec2_elb_facts:
+ region: "{{ provision.region }}"
+ names:
+ - "{{ item }}"
+ with_items:
+ - "{{ provision.clusterid }}-master-external"
+ - "{{ provision.clusterid }}-master-internal"
+ delegate_to: localhost
+ register: elbs
+
+ - debug: var=elbs
+
+ - name: set fact
+ set_fact:
+ openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+ osm_custom_cors_origins:
+ - "{{ elbs.results[1].elbs[0].dns_name }}"
+ - "console.{{ provision.clusterid }}.openshift.com"
+ - "api.{{ provision.clusterid }}.openshift.com"
+ with_items: "{{ groups['masters'] }}"
+
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: run the std_include
+ include: ../../common/openshift-cluster/std_include.yml
+
+- name: run the config
+ include: ../../common/openshift-cluster/config.yml
diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml
new file mode 100644
index 000000000..5428fb307
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_nodes.yml
@@ -0,0 +1,47 @@
+---
+# Get bootstrap config token
+# bootstrap should be created on first master
+# need to fetch it and shove it into cloud data
+- name: create the node scale groups
+ hosts: localhost
+ connection: local
+ gather_facts: yes
+ tasks:
+ - name: get provisioning vars
+ include_vars: vars.yml
+
+ - name: fetch master instances
+ ec2_remote_facts:
+ region: "{{ provision.region }}"
+ filters:
+ "tag:clusterid": "{{ provision.clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: slurp down the bootstrap.kubeconfig
+ slurp:
+ src: /etc/origin/master/bootstrap.kubeconfig
+ delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
+ remote_user: root
+ register: bootstrap
+
+ - name: set_fact on localhost for kubeconfig
+ set_fact:
+ local_bootstrap: "{{ bootstrap }}"
+ launch_config_name:
+ infra: "infra-{{ ansible_date_time.epoch }}"
+ compute: "compute-{{ ansible_date_time.epoch }}"
+
+ - name: include build node group
+ include: build_node_group.yml
+ vars:
+ openshift_build_node_type: infra
+
+ - name: include build node group
+ include: build_node_group.yml
+ vars:
+ openshift_build_node_type: compute
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
deleted file mode 100644
index 6fa9142a0..000000000
--- a/playbooks/aws/openshift-cluster/scaleup.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars_files:
- - vars.yml
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ groups.nodes_to_add }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: ../../common/openshift-cluster/scaleup.yml
- vars_files:
- - ../../aws/openshift-cluster/vars.yml
- - ../../aws/openshift-cluster/cluster_hosts.yml
- vars:
- g_new_node_hosts: "{{ groups.nodes_to_add }}"
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml
deleted file mode 100644
index f7f4812bb..000000000
--- a/playbooks/aws/openshift-cluster/service.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ master_hosts | default([]) }}"
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ node_hosts | default([]) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 608512b79..000000000
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,188 +0,0 @@
----
-- set_fact:
- created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
- docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
- cluster: "{{ cluster_id }}"
- env: "{{ cluster_env }}"
- host_type: "{{ type }}"
- sub_host_type: "{{ g_sub_host_type }}"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "master" and sub_host_type == "default"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "etcd" and sub_host_type == "default"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "node" and sub_host_type == "infra"
-
-- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
- ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
- when: host_type == "node" and sub_host_type == "compute"
-
-- set_fact:
- ec2_instance_type: "{{ deployment_vars[deployment_type].type }}"
- when: ec2_instance_type is not defined
-- set_fact:
- ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}"
- when: ec2_security_groups is not defined
-
-- name: Find amis for deployment_type
- ec2_ami_find:
- region: "{{ deployment_vars[deployment_type].region }}"
- ami_id: "{{ deployment_vars[deployment_type].image }}"
- name: "{{ deployment_vars[deployment_type].image_name }}"
- register: ami_result
-
-- fail: msg="Could not find requested ami"
- when: not ami_result.results
-
-- set_fact:
- latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}"
- volume_defs:
- etcd:
- root:
- volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
- device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
- master:
- root:
- volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
- device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
- docker:
- volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}"
- device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
- node:
- root:
- volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
- device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
- docker:
- volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
- device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
-
-- set_fact:
- volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
-
-- name: Launch instance(s)
- ec2:
- state: present
- region: "{{ deployment_vars[deployment_type].region }}"
- keypair: "{{ deployment_vars[deployment_type].keypair }}"
- group: "{{ deployment_vars[deployment_type].security_groups }}"
- instance_type: "{{ ec2_instance_type }}"
- image: "{{ deployment_vars[deployment_type].image }}"
- count: "{{ instances | length }}"
- vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}"
- assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}"
- user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
- wait: yes
- instance_tags:
- created-by: "{{ created_by }}"
- clusterid: "{{ cluster }}"
- environment: "{{ cluster_env }}"
- host-type: "{{ host_type }}"
- sub-host-type: "{{ sub_host_type }}"
- volumes: "{{ volumes }}"
- register: ec2
-
-- name: Add Name tag to instances
- ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
- args:
- tags:
- Name: "{{ item.0 }}"
-
-- set_fact:
- instance_groups: >
- tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }},
- tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }},
- tag_sub-host-type_{{ sub_host_type }}
-
-- set_fact:
- node_label:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{sub_host_type}}"
- when: host_type == "node"
-
-- set_fact:
- node_label:
- region: "{{ deployment_vars[deployment_type].region }}"
- type: "{{host_type}}"
- when: host_type != "node"
-
-- set_fact:
- logrotate:
- - name: syslog
- path: |
- /var/log/cron
- /var/log/maillog
- /var/log/messages
- /var/log/secure
- /var/log/spooler"
- options:
- - daily
- - rotate 7
- - compress
- - sharedscripts
- - missingok
- scripts:
- postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
-
-- name: Add new instances groups and variables
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "{{ instance_groups }}"
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- ec2_tag_sub-host-type: "{{ sub_host_type }}"
- openshift_node_labels: "{{ node_label }}"
- logrotate_scripts: "{{ logrotate }}"
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
-
-- name: Add new instances to nodes_to_add group if needed
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: nodes_to_add
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- openshift_node_labels: "{{ node_label }}"
- logrotate_scripts: "{{ logrotate }}"
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
- when: oo_extend_env is defined and oo_extend_env | bool
-
-- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
- with_items: "{{ ec2.instances }}"
-
-- name: Wait for user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_together:
- - "{{ instances }}"
- - "{{ ec2.instances }}"
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
deleted file mode 100644
index b1087f9c4..000000000
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#cloud-config
-{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
-mounts:
-- [ xvdb ]
-- [ ephemeral0 ]
-{% endif %}
-
-write_files:
-{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
-- content: |
- DEVS=/dev/xvdb
- VG=docker_vg
- path: /etc/sysconfig/docker-storage-setup
- owner: root:root
- permissions: '0644'
-{% endif %}
-{% if deployment_vars[deployment_type].become | bool %}
-- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
- permissions: 440
- content: |
- Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty
-{% endif %}
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
deleted file mode 100644
index 1f15aa4bf..000000000
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Remove tags from instances
- ec2_tag:
- resource: "{{ hostvars[item]['ec2_id'] }}"
- region: "{{ hostvars[item]['ec2_region'] }}"
- state: absent
- tags:
- environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
- clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
- host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
- sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
-
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ hostvars[item].ec2_id }}"]
- region: "{{ hostvars[item].ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
-
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail:
- msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
-
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
-
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: "{{ ec2_stop.results }}"
- when: ec2_stop | changed
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
deleted file mode 100644
index ed05d61ed..000000000
--- a/playbooks/aws/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Update - Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Update - Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index d774187f0..47da03cb7 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1,33 +1,113 @@
---
-debug_level: 2
-
-deployment_rhel7_ent_base:
- # rhel-7.1, requires cloud access subscription
- image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}"
- image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
- region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
- ssh_user: ec2-user
- become: yes
- keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
- type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
- security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
- vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
- assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
-
-deployment_vars:
- origin:
- # centos-7, requires marketplace
- image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"
- image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
- region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
- ssh_user: centos
- become: yes
- keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
- type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
- security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
- vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
- assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
-
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
+
+clusterid: mycluster
+region: us-east-1
+
+provision:
+ clusterid: "{{ clusterid }}"
+ region: "{{ region }}"
+
+ build: # build specific variables here
+ ami_name: "openshift-gi-"
+ base_image: ami-bdd5d6ab # base image for AMI to build from
+
+ # when creating an encrypted AMI please specify use_encryption
+ use_encryption: False
+
+ openshift_ami_tags:
+ bootstrap: "true"
+ openshift-created: "true"
+ clusterid: "{{ clusterid }}"
+
+ # Use s3 backed registry storage
+ openshift_registry_s3: True
+
+ # if using custom certificates these are required for the ELB
+ iam_cert_ca:
+ name: "{{ clusterid }}_openshift"
+ cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
+ key_path: '/path/to/wildcard.<clusterid>.example.com.key'
+ chain_path: '/path/to/cert.ca.crt'
+
+ instance_users:
+ - key_name: myuser_key
+ username: myuser
+ pub_key: |
+ ssh-rsa AAAA== myuser@system
+
+ node_group_config:
+ tags:
+ clusterid: "{{ clusterid }}"
+ environment: stg
+
+ ssh_key_name: myuser_key
+
+ # master specific cluster node settings
+ master:
+ instance_type: m4.xlarge
+ ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+
+ # compute specific cluster node settings
+ compute:
+ instance_type: m4.xlarge
+ ami: ami-cdeec8b6
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 100
+ desired_size: 3
+ tags:
+ host-type: node
+ sub-host-type: compute
+
+ # infra specific cluster node settings
+ infra:
+ instance_type: m4.xlarge
+ ami: ami-cdeec8b6
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 2
+ max_size: 20
+ desired_size: 2
+ tags:
+ host-type: node
+ sub-host-type: infra
+
+ # vpc settings
+ vpc:
+ cidr: 172.31.0.0/16
+ subnets:
+ us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
+ - cidr: 172.31.48.0/20
+ az: "us-east-1c"
+ - cidr: 172.31.32.0/20
+ az: "us-east-1e"
+ - cidr: 172.31.16.0/20
+ az: "us-east-1a"
diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md
index f0f14b268..b26e7d7ed 100644
--- a/playbooks/byo/openshift-checks/README.md
+++ b/playbooks/byo/openshift-checks/README.md
@@ -7,15 +7,14 @@ Ansible's default operation mode is to fail fast, on the first error. However,
when performing checks, it is useful to gather as much information about
problems as possible in a single run.
-Thus, the playbooks run a battery of checks against the inventory hosts and have
-Ansible gather intermediate errors, giving a more complete diagnostic of the
-state of each host. If any check failed, the playbook run will be marked as
-failed.
+Thus, the playbooks run a battery of checks against the inventory hosts and
+gather intermediate errors, giving a more complete diagnostic of the state of
+each host. If any check failed, the playbook run will be marked as failed.
To facilitate understanding the problems that were encountered, a custom
callback plugin summarizes execution errors at the end of a playbook run.
-# Available playbooks
+## Available playbooks
1. Pre-install playbook ([pre-install.yml](pre-install.yml)) - verifies system
requirements and look for common problems that can prevent a successful
@@ -27,6 +26,10 @@ callback plugin summarizes execution errors at the end of a playbook run.
3. Certificate expiry playbooks ([certificate_expiry](certificate_expiry)) -
check that certificates in use are valid and not expiring soon.
+4. Adhoc playbook ([adhoc.yml](adhoc.yml)) - use it to run adhoc checks or to
+ list existing checks.
+ See the [next section](#the-adhoc-playbook) for a usage example.
+
## Running
With a [recent installation of Ansible](../../../README.md#setup), run the playbook
@@ -59,6 +62,41 @@ against your inventory file. Here is the step-by-step:
$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v
```
+### The adhoc playbook
+
+The adhoc playbook gives flexibility to run any check or a custom group of
+checks. What will be run is determined by the `openshift_checks` variable,
+which, among other ways supported by Ansible, can be set on the command line
+using the `-e` flag.
+
+For example, to run the `docker_storage` check:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage
+```
+
+To run more checks, use a comma-separated list of check names:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability
+```
+
+To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight
+```
+
+It is valid to specify multiple check tags and individual check names together
+in a comma-separated list.
+
+To list all of the available checks and tags, run the adhoc playbook without
+setting the `openshift_checks` variable:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml
+```
+
## Running in a container
This repository is built into a Docker image including Ansible so that it can
diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/byo/openshift-checks/adhoc.yml
new file mode 100644
index 000000000..226bed732
--- /dev/null
+++ b/playbooks/byo/openshift-checks/adhoc.yml
@@ -0,0 +1,27 @@
+---
+# NOTE: ideally this would be just part of a single play in
+# common/openshift-checks/adhoc.yml that lists the existing checks when
+# openshift_checks is not set or run the requested checks. However, to actually
+# run the checks we need to have the included dependencies to run first and that
+# takes time. To speed up listing checks, we use this separate play that runs
+# before the include of dependencies to save time and improve the UX.
+- name: OpenShift health checks
+ # NOTE: though the openshift_checks variable could be potentially defined on
+ # individual hosts while not defined for localhost, we do not support that
+ # usage. Running this play only in localhost speeds up execution.
+ hosts: localhost
+ connection: local
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: adhoc
+ pre_tasks:
+ - name: List known health checks
+ action: openshift_health_check
+ when: openshift_checks is undefined or not openshift_checks
+
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-checks/adhoc.yml
diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml
index dfc1a7db0..96a71e4dc 100644
--- a/playbooks/byo/openshift-checks/health.yml
+++ b/playbooks/byo/openshift-checks/health.yml
@@ -1,3 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-checks/health.yml
diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml
index 5e8c3ab9b..dd93df0bb 100644
--- a/playbooks/byo/openshift-checks/pre-install.yml
+++ b/playbooks/byo/openshift-checks/pre-install.yml
@@ -1,3 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
new file mode 100644
index 000000000..15917078d
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml
@@ -0,0 +1,4 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/openshift_prometheus.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-provisioners.yml b/playbooks/byo/openshift-cluster/openshift-provisioners.yml
new file mode 100644
index 000000000..8e80f158b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-provisioners.yml
@@ -0,0 +1,6 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-cluster/openshift_provisioners.yml
diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml
index dd3f47a4d..1342bd60c 100644
--- a/playbooks/byo/openshift-etcd/config.yml
+++ b/playbooks/byo/openshift-etcd/config.yml
@@ -1,14 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-etcd/config.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml
index 143016159..2dec2bef6 100644
--- a/playbooks/byo/openshift-etcd/migrate.yml
+++ b/playbooks/byo/openshift-etcd/migrate.yml
@@ -1,8 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
+
+- include: ../../common/openshift-cluster/std_include.yml
- include: ../../common/openshift-etcd/migrate.yml
- tags:
- - always
diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml
index d43533641..034bba4b4 100644
--- a/playbooks/byo/openshift-etcd/restart.yml
+++ b/playbooks/byo/openshift-etcd/restart.yml
@@ -1,10 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-etcd/restart.yml
diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml
index c35fd9f37..a2a5856a9 100644
--- a/playbooks/byo/openshift-etcd/scaleup.yml
+++ b/playbooks/byo/openshift-etcd/scaleup.yml
@@ -1,8 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
-- include: ../../common/openshift-cluster/evaluate_groups.yml
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-etcd/scaleup.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
new file mode 100644
index 000000000..98be0c448
--- /dev/null
+++ b/playbooks/byo/openshift-master/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-master/config.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 7988863f3..8950efd00 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -1,10 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-master/restart.yml
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index 64811e80d..e3ef704e5 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -15,6 +15,8 @@
when:
- (g_new_master_hosts | default([]) | length == 0) or (g_new_node_hosts | default([]) | length == 0)
+- include: ../../common/openshift-cluster/std_include.yml
+
- include: ../../common/openshift-master/scaleup.yml
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
new file mode 100644
index 000000000..839dc36ff
--- /dev/null
+++ b/playbooks/byo/openshift-node/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml
index 92665d71d..ccf9e82da 100644
--- a/playbooks/byo/openshift-node/restart.yml
+++ b/playbooks/byo/openshift-node/restart.yml
@@ -1,10 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-node/restart.yml
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index fda89b1ea..e0c36fb69 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -14,9 +14,6 @@
when:
- g_new_node_hosts | default([]) | length == 0
-- include: ../../common/openshift-node/scaleup.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}"
- openshift_master_etcd_port: 2379
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml
deleted file mode 100644
index 76246e7b0..000000000
--- a/playbooks/byo/vagrant.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: rhel_subscribe.yml
-
-- include: config.yml
diff --git a/playbooks/common/README.md b/playbooks/common/README.md
index 0b5e26989..968bd99cb 100644
--- a/playbooks/common/README.md
+++ b/playbooks/common/README.md
@@ -1,9 +1,8 @@
# Common playbooks
This directory has a generic set of playbooks that are included by playbooks in
-[`byo`](../byo), as well as other playbooks related to the
-[`bin/cluster`](../../bin) tool.
+[`byo`](../byo).
Note: playbooks in this directory use generic group names that do not line up
-with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
-requiring an explicit remapping of groups.
+with the groups used by the `byo` playbooks, requiring an explicit remapping of
+groups.
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
new file mode 100644
index 000000000..dfcef8435
--- /dev/null
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -0,0 +1,12 @@
+---
+- name: OpenShift health checks
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: adhoc
+ post_tasks:
+ - name: Run health checks
+ action: openshift_health_check
+ args:
+ checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index ff5b5af67..21ea785ef 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,10 +1,6 @@
---
-- include: ../openshift-cluster/std_include.yml
- tags:
- - always
-
- name: Run OpenShift health checks
- hosts: OSEv3
+ hosts: oo_all_hosts
roles:
- openshift_health_checker
vars:
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index 861229f21..88e6f9120 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,10 +1,6 @@
---
-- include: ../openshift-cluster/std_include.yml
- tags:
- - always
-
-- hosts: OSEv3
- name: run OpenShift pre-install checks
+- name: run OpenShift pre-install checks
+ hosts: oo_all_hosts
roles:
- openshift_health_checker
vars:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index e1df71112..5f420a76c 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -18,33 +18,11 @@
- docker_image_availability
- docker_storage
-- include: initialize_firewall.yml
- tags:
- - always
-
-- hosts: localhost
- tasks:
- - fail:
- msg: No etcd hosts defined. Running an all-in-one master is deprecated and will no longer be supported in a future upgrade.
- when: groups.oo_etcd_to_config | default([]) | length == 0 and not openshift_master_unsupported_all_in_one | default(False)
-
- include: initialize_oo_option_facts.yml
tags:
- always
-- name: Disable excluders
- hosts: oo_masters_to_config:oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
- include: ../openshift-etcd/config.yml
- tags:
- - etcd
- include: ../openshift-nfs/config.yml
tags:
@@ -55,12 +33,6 @@
- loadbalancer
- include: ../openshift-master/config.yml
- tags:
- - master
-
-- include: additional_config.yml
- tags:
- - master
- include: ../openshift-node/config.yml
tags:
@@ -79,13 +51,3 @@
- openshift_enable_service_catalog | default(false) | bool
tags:
- servicecatalog
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config:oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c56b07037..c9f37109b 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -13,12 +13,12 @@
- name: Evaluate groups - g_master_hosts or g_new_master_hosts required
fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined or g_new_master_hosts is not defined
+ when: g_master_hosts is not defined and g_new_master_hosts is not defined
- name: Evaluate groups - g_node_hosts or g_new_node_hosts required
fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined or g_new_node_hosts is not defined
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
- name: Evaluate groups - g_lb_hosts required
fail:
@@ -33,13 +33,22 @@
- name: Evaluate groups - g_nfs_hosts is single host
fail:
msg: The nfs group must be limited to one host
- when: (groups[g_nfs_hosts] | default([])) | length > 1
+ when: g_nfs_hosts | default([]) | length > 1
- name: Evaluate groups - g_glusterfs_hosts required
fail:
msg: This playbook requires g_glusterfs_hosts to be set
when: g_glusterfs_hosts is not defined
+ - name: Evaluate groups - Fail if no etcd hosts group is defined
+ fail:
+ msg: >
+ No etcd hosts defined. Running an all-in-one master is deprecated and
+ will no longer be supported in a future upgrade.
+ when:
+ - g_etcd_hosts | default([]) | length == 0
+ - not openshift_master_unsupported_all_in_one | default(False)
+
- name: Evaluate oo_all_hosts
add_host:
name: "{{ item }}"
@@ -117,7 +126,7 @@
add_host:
name: "{{ item }}"
groups: oo_etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
changed_when: False
- name: Evaluate oo_nodes_to_config
@@ -173,5 +182,5 @@
groups: oo_etcd_to_migrate
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 4bf5d33b1..9eaf3bc34 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -108,6 +108,20 @@
when:
- l_any_system_container | bool
+ - name: Default system_images_registry to a enterprise registry
+ set_fact:
+ system_images_registry: "registry.access.redhat.com"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "openshift-enterprise"
+
+ - name: Default system_images_registry to community registry
+ set_fact:
+ system_images_registry: "docker.io"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "origin"
+
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
role: common
@@ -115,6 +129,7 @@
debug_level: "{{ openshift_debug_level | default(2) }}"
deployment_type: "{{ openshift_deployment_type }}"
deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ cli_image: "{{ osm_image | default(None) }}"
cluster_id: "{{ openshift_cluster_id | default('default') }}"
hostname: "{{ openshift_hostname | default(None) }}"
ip: "{{ openshift_ip | default(None) }}"
@@ -124,7 +139,7 @@
is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
etcd_runtime: "{{ l_etcd_runtime }}"
- system_images_registry: "{{ system_images_registry | default('') }}"
+ system_images_registry: "{{ system_images_registry }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
@@ -142,4 +157,4 @@
- name: initialize_facts set_fact on openshift_docker_hosted_registry_network
set_fact:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_firewall.yml b/playbooks/common/openshift-cluster/initialize_firewall.yml
deleted file mode 100644
index f0374fbc7..000000000
--- a/playbooks/common/openshift-cluster/initialize_firewall.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Initialize host firewall
- hosts: oo_all_hosts
- tasks:
- - name: Install and configure the proper firewall settings
- include_role:
- name: os_firewall
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 99a634970..a391b963a 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -49,6 +49,9 @@
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ - role: openshift_prometheus
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
- name: Update master-config for publicLoggingURL
hosts: oo_masters_to_config:!oo_first_master
tags:
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
new file mode 100644
index 000000000..a979c0c00
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -0,0 +1,9 @@
+---
+- include: std_include.yml
+
+- name: OpenShift Prometheus
+ hosts: oo_first_master
+ roles:
+ - openshift_prometheus
+ vars:
+ openshift_prometheus_state: present
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
deleted file mode 100644
index be956fca5..000000000
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Subscribe hosts, update repos and update OS packages
- hosts: oo_hosts_to_update
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 02b8a9d3c..7cc13137f 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -60,7 +60,7 @@
retries: 60
delay: 60
- - include: upgrade.yml
+ - include: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 83f16ac0d..83f16ac0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 808cc562c..808cc562c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 9d8b73cff..9d8b73cff 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
deleted file mode 100644
index 354af3cde..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index abcd21c90..18f10437d 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -91,10 +91,7 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
- - name: Remove any legacy systemd units
- include: ../../../../roles/openshift_master/tasks/clean_systemd_units.yml
-
- - name: Update systemd units
+ - name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
- name: Check for ca-bundle.crt
@@ -284,7 +281,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/upgrade.yml
+ - include: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
index 0f6fb46a4..a241ef039 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index cfba788a8..54c85f0fb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 1054f430e..cee4e9087 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
index 783289c87..ae217ba2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 8aa443c3c..d7cb38d03 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index 436795694..8531e6045 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 9a000265e..a3d0d6305 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -89,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 2dd9676c7..5fee56615 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index d5fe8285e..e29d0f8e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 8ceab09f4..51acd17da 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index f765e9064..9fe059ac9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 8bed6a8c2..1b10d4e37 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index 4f05d0c64..9ec40723a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 2ef95e778..f97f34c3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -93,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
index abc4c245b..e95b90cd5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -90,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index 90e95422b..f76fc68d1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -7,6 +7,17 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
+
tasks:
- name: Check for invalid namespaces and SDN errors
oc_objectvalidator:
+
+ - name: Confirm OpenShift authorization objects are in sync
+ command: >
+ {{ openshift.common.client_binary }} adm migrate authorization
+ when: not openshift.common.version_gte_3_7 | bool
+ changed_when: false
+ register: l_oc_result
+ until: l_oc_result.rc == 0
+ retries: 4
+ delay: 15
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2cb6197d1..f2b85eea1 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -3,6 +3,7 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
+ - role: os_firewall
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index 3e7a48669..a2af7bb21 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,34 +1,20 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
- tags:
- - always
-
- name: Run pre-checks
hosts: oo_etcd_to_migrate
- tags:
- - always
roles:
- role: etcd_migrate
r_etcd_migrate_action: check
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
-- include: ../openshift-cluster/initialize_facts.yml
- tags:
- - always
-
+# TODO: This will be different for release-3.6 branch
- name: Prepare masters for etcd data migration
hosts: oo_masters_to_config
tasks:
- set_fact:
master_services:
- - "{{ openshift.common.service_type + '-master' }}"
- - set_fact:
- master_services:
- "{{ openshift.common.service_type + '-master-controllers' }}"
- "{{ openshift.common.service_type + '-master-api' }}"
- when:
- - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
- debug:
msg: "master service name: {{ master_services }}"
- name: Stop masters
@@ -40,8 +26,6 @@
- name: Backup v2 data
hosts: oo_etcd_to_migrate
gather_facts: no
- tags:
- - always
roles:
- role: openshift_facts
- role: etcd_common
@@ -67,16 +51,53 @@
when:
- etcd_backup_failed | length > 0
-- name: Migrate etcd data from v2 to v3
+- name: Stop etcd
hosts: oo_etcd_to_migrate
gather_facts: no
- tags:
- - always
+ pre_tasks:
+ - set_fact:
+ l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
+ - name: Disable etcd members
+ service:
+ name: "{{ l_etcd_service }}"
+ state: stopped
+
+- name: Migrate data on first etcd
+ hosts: oo_etcd_to_migrate[0]
+ gather_facts: no
roles:
- role: etcd_migrate
r_etcd_migrate_action: migrate
r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
etcd_peer: "{{ ansible_default_ipv4.address }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+- name: Clean data stores on remaining etcd hosts
+ hosts: oo_etcd_to_migrate[1:]
+ gather_facts: no
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: clean_data
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+ post_tasks:
+ - name: Add etcd hosts
+ delegate_to: localhost
+ add_host:
+ name: "{{ item }}"
+ groups: oo_new_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}"
+ changed_when: no
+ - name: Set success
+ set_fact:
+ r_etcd_migrate_success: true
+
+- include: ./scaleup.yml
- name: Gate on etcd migration
hosts: oo_masters_to_config
@@ -89,6 +110,16 @@
- set_fact:
etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}"
+- name: Add TTLs on the first master
+ hosts: oo_first_master[0]
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: add_ttls
+ etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].ansible_default_ipv4.address }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+ when: etcd_migration_failed | length == 0
+
- name: Configure masters if etcd data migration is succesfull
hosts: oo_masters_to_config
roles:
@@ -100,10 +131,6 @@
msg: "Skipping master re-configuration since migration failed."
when:
- etcd_migration_failed | length > 0
-
-- name: Start masters after etcd data migration
- hosts: oo_masters_to_config
- tasks:
- name: Start master services
service:
name: "{{ item }}"
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index 192305bc8..5f8bb1c7a 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -24,15 +24,32 @@
member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
delegate_to: "{{ etcd_ca_host }}"
register: etcd_add_check
+ retries: 3
+ delay: 10
+ until: etcd_add_check.rc == 0
roles:
+ - role: os_firewall
+ when: etcd_add_check.rc == 0
- role: openshift_etcd
when: etcd_add_check.rc == 0
etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
- initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') }}"
+ initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
when: etcd_add_check.rc == 0
+ post_tasks:
+ - name: Verify cluster is stable
+ command: >
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
+ --key-file {{ etcd_peer_key_file }}
+ --ca-file {{ etcd_peer_ca_file }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
+ cluster-health
+ register: scaleup_health
+ retries: 3
+ delay: 30
+ until: scaleup_health.rc == 0
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
deleted file mode 100644
index ced4bddc5..000000000
--- a/playbooks/common/openshift-etcd/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_etcd
- add_host:
- name: "{{ item }}"
- groups: g_service_etcd
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change etcd state on etcd instance(s)
- hosts: g_service_etcd
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index 2dacc1218..09ed81a83 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -14,4 +14,5 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
+ - role: os_firewall
- role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
deleted file mode 100644
index d3762c961..000000000
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_lb
- add_host:
- name: "{{ item }}"
- groups: g_service_lb
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on lb instance(s)
- hosts: g_service_lb
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index c0ea93d2c..c0ea93d2c 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index b30450def..b29b9ef4f 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,4 +1,12 @@
---
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
vars:
@@ -180,6 +188,7 @@
| oo_collect('openshift.common.ip') | default([]) | join(',')
}}"
roles:
+ - role: os_firewall
- role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
@@ -207,3 +216,14 @@
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- include: additional_config.yml
+ when: not g_openshift_master_is_scaleup
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index bc61ee9bb..17f9ef4bc 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -1,11 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
- name: Update master count
hosts: oo_masters:!oo_masters_to_config
serial: 1
@@ -50,38 +43,8 @@
delay: 1
changed_when: false
-- name: Configure docker hosts
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- name: Disable excluders
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
- include: ../openshift-node/config.yml
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
deleted file mode 100644
index 48a2731aa..000000000
--- a/playbooks/common/openshift-master/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on master instance(s)
- hosts: g_service_masters
- connection: ssh
- gather_facts: no
- tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
deleted file mode 100644
index b1e35e4b1..000000000
--- a/playbooks/common/openshift-nfs/service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Populate g_service_nfs host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nfs
- add_host:
- name: "{{ item }}"
- groups: g_service_nfs
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on nfs instance(s)
- hosts: g_service_nfs
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index ef7d54f9f..c13417714 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,4 +1,12 @@
---
+- name: Disable excluders
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
- name: Evaluate node groups
hosts: localhost
become: no
@@ -32,6 +40,7 @@
}}"
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -47,6 +56,7 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -76,3 +86,11 @@
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
deleted file mode 100644
index 40da8990d..000000000
--- a/playbooks/common/openshift-node/scaleup.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
-- name: Gather and set facts for first master
- hosts: oo_first_master
- vars:
- openshift_master_count: "{{ groups.oo_masters | length }}"
- pre_tasks:
- - set_fact:
- openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
- when: openshift_master_default_subdomain is not defined
- roles:
- - openshift_master_facts
-
-- name: Configure docker hosts
- hosts: oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- name: Disable excluders
- hosts: oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
-- include: ../openshift-node/config.yml
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
deleted file mode 100644
index 130a5416f..000000000
--- a/playbooks/common/openshift-node/service.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on node instance(s)
- hosts: g_service_nodes
- connection: ssh
- gather_facts: no
- tasks:
- - name: Change state on node instance(s)
- service:
- name: "{{ service_type }}-node"
- state: "{{ new_cluster_state }}"
diff --git a/playbooks/gce/README.md b/playbooks/gce/README.md
deleted file mode 100644
index 0514d6f50..000000000
--- a/playbooks/gce/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# GCE playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/gce/openshift-cluster/add_nodes.yml b/playbooks/gce/openshift-cluster/add_nodes.yml
deleted file mode 100644
index 765e03fdc..000000000
--- a/playbooks/gce/openshift-cluster/add_nodes.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- oo_extend_env: True
- tasks:
- - fail:
- msg: Deployment type not supported for gce provider yet
- when: deployment_type == 'enterprise'
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
-- include: scaleup.yml
-- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index e5f41382b..000000000
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
deleted file mode 100644
index 2625d4d05..000000000
--- a/playbooks/gce/openshift-cluster/config.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hostname: "{{ gce_private_ip }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/gce/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/gce/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
deleted file mode 100644
index 7532a678b..000000000
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - fail: msg="Deployment type not supported for gce provider yet"
- when: deployment_type == 'enterprise'
-
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
- gce_machine_type: "{{ lookup('env', 'gce_machine_etcd_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_etcd_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
- gce_machine_type: "{{ lookup('env', 'gce_machine_master_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_master_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
- gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
- gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
-
- - add_host:
- name: "{{ master_names.0 }}"
- groups: service_master
- when: master_names is defined and master_names.0 is defined
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
deleted file mode 100644
index 34ab09533..000000000
--- a/playbooks/gce/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
- oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
- with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/lookup_plugins b/playbooks/gce/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/gce/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/gce/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/gce/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml
deleted file mode 100644
index 13b267976..000000000
--- a/playbooks/gce/openshift-cluster/service.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
-
- - add_host:
- name: "{{ item }}"
- groups: g_service_masters
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 65dd2b71e..000000000
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- name: Launch instance(s)
- gce:
- instance_names: "{{ instances|join(',') }}"
- machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}"
- image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}"
- service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- project_id: "{{ lookup('env', 'gce_project_id') }}"
- zone: "{{ lookup('env', 'zone') }}"
- network: "{{ lookup('env', 'network') }}"
- subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
- # unsupported in 1.9.+
- #service_account_permissions: "datastore,logging-write"
- tags:
- - created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }}
- - environment-{{ cluster_env }}
- - clusterid-{{ cluster_id }}
- - host-type-{{ type }}
- - sub-host-type-{{ g_sub_host_type }}
- metadata:
- startup-script: |
- #!/bin/bash
- echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}
-
- when: instances |length > 0
- register: gce
-
-- set_fact:
- node_label:
- # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
- region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
- type: "{{ g_sub_host_type }}"
- when: instances |length > 0 and type == "node"
-
-- set_fact:
- node_label:
- # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
- region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
- type: "{{ type }}"
- when: instances |length > 0 and type != "node"
-
-- name: Add new instances to groups and set variables needed
- add_host:
- hostname: "{{ item.name }}"
- ansible_ssh_host: "{{ item.public_ip }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
- gce_public_ip: "{{ item.public_ip }}"
- gce_private_ip: "{{ item.private_ip }}"
- openshift_node_labels: "{{ node_label }}"
- with_items: "{{ gce.instance_data | default([], true) }}"
-
-- name: Wait for ssh
- wait_for: port=22 host={{ item.public_ip }}
- with_items: "{{ gce.instance_data | default([], true) }}"
-
-- name: Wait for user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
- register: result
- until: result.rc == 0
- retries: 30
- delay: 5
- with_items: "{{ gce.instance_data | default([], true) }}"
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
deleted file mode 100644
index afe269b7c..000000000
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instances(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- name: "{{ item }}"
- service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- project_id: "{{ lookup('env', 'gce_project_id') }}"
- zone: "{{ lookup('env', 'zone') }}"
- with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
- when: item is defined
-
-#- include: ../openshift-node/terminate.yml
-# vars:
-# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
-#
-#- include: ../openshift-master/terminate.yml
-# vars:
-# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
deleted file mode 100644
index 6d2af3d26..000000000
--- a/playbooks/gce/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
deleted file mode 100644
index 13c754c1e..000000000
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-debug_level: 2
-
-deployment_rhel7_ent_base:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
- machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
- ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- become: yes
-
-deployment_vars:
- origin:
- image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
- machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
- ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/libvirt/README.md b/playbooks/libvirt/README.md
deleted file mode 100644
index 3ce46a76f..000000000
--- a/playbooks/libvirt/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# libvirt playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index e5f41382b..000000000
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
deleted file mode 100644
index 569e00da2..000000000
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-# TODO: need to figure out a plan for setting hostname, currently the default
-# is localhost, so no hostname value (or public_hostname) value is getting
-# assigned
-
-- include: ../../common/openshift-cluster/std_include.yml
-
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- g_nodeonmaster: true
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/libvirt/openshift-cluster/filter_plugins b/playbooks/libvirt/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/libvirt/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
deleted file mode 100644
index 2475b9d6b..000000000
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- vars:
- image_url: "{{ deployment_vars[deployment_type].image.url }}"
- image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
- image_name: "{{ deployment_vars[deployment_type].image.name }}"
- image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
- tasks:
- - include: tasks/configure_libvirt.yml
-
- - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ etcd_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "default"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "compute"
- count: "{{ num_nodes }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
- - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
- vars:
- type: "infra"
- count: "{{ num_infra }}"
- - include: tasks/launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
- g_sub_host_type: "{{ sub_host_type }}"
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
deleted file mode 100644
index 579cd7ac6..000000000
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: ""
- oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/lookup_plugins b/playbooks/libvirt/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/libvirt/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/roles b/playbooks/libvirt/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/libvirt/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml
deleted file mode 100644
index 8bd24a8cf..000000000
--- a/playbooks/libvirt/openshift-cluster/service.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# TODO: need to figure out a plan for setting hostname, currently the default
-# is localhost, so no hostname value (or public_hostname) value is getting
-# assigned
-
-- name: Call same systemctl command for openshift on all instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - fail: msg="cluster_id is required to be injected in this playbook"
- when: cluster_id is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: g_service_masters
- with_items: "{{ g_master_hosts | default([]) }}"
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: g_service_nodes
- with_items: "{{ g_node_hosts | default([]) }}"
-
-- include: ../../common/openshift-node/service.yml
-- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
deleted file mode 100644
index f237c1a60..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- include: configure_libvirt_storage_pool.yml
- when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
-
-- include: configure_libvirt_network.yml
- when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
deleted file mode 100644
index b42ca83af..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Create the libvirt network for OpenShift
- virt_net:
- name: '{{ libvirt_network }}'
- state: '{{ item }}'
- autostart: 'yes'
- xml: "{{ lookup('template', 'network.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items:
- - present
- - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
deleted file mode 100644
index 8685624ec..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: Create libvirt storage directory for openshift
- file:
- dest: "{{ libvirt_storage_pool_path }}"
- state: directory
-
-# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set.
-- acl:
- default: '{{ item.default }}'
- entity: kvm
- etype: group
- name: "{{ libvirt_storage_pool_path }}"
- permissions: '{{ item.permissions }}'
- state: present
- with_items:
- - default: no
- permissions: x
- - default: yes
- permissions: rwx
-
-- name: Create the libvirt storage pool for OpenShift
- virt_pool:
- name: '{{ libvirt_storage_pool }}'
- state: '{{ item }}'
- autostart: 'yes'
- xml: "{{ lookup('template', 'storage-pool.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items:
- - present
- - active
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
deleted file mode 100644
index 4df86effa..000000000
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ /dev/null
@@ -1,142 +0,0 @@
----
-# TODO: Add support for choosing base image based on deployment_type and os
-# wanted (os wanted needs support added in bin/cluster with sane defaults:
-# fedora/centos for origin, rhel for enterprise)
-
-# TODO: create a role to encapsulate some of this complexity, possibly also
-# create a module to manage the storage tasks, network tasks, and possibly
-# even handle the libvirt tasks to set metadata in the domain xml and be able
-# to create/query data about vms without having to use xml the python libvirt
-# bindings look like a good candidate for this
-
-- name: Download Base Cloud image
- get_url:
- url: '{{ image_url }}'
- sha256sum: '{{ image_sha256 }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
- when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"]
- register: downloaded_image
-
-- name: Uncompress xz compressed base cloud image
- command: 'unxz -kf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["xz"] and downloaded_image.changed
-
-- name: Uncompress tgz compressed base cloud image
- command: 'tar zxvf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["tgz"] and downloaded_image.changed
-
-- name: Uncompress gzip compressed base cloud image
- command: 'gunzip {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
- args:
- creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
- when: image_compression in ["gz"] and downloaded_image.changed
-
-- name: Create the cloud-init config drive path
- file:
- dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- state: directory
- with_items: '{{ instances }}'
-
-- name: Create the cloud-init config drive files
- template:
- src: '{{ item[1] }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
- with_nested:
- - '{{ instances }}'
- - [ user-data, meta-data ]
-
-- name: Check for genisoimage
- command: which genisoimage
- register: which_genisoimage
-
-- name: Create the cloud-init config drive
- command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data"
- args:
- chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/"
- creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso"
- with_items: '{{ instances }}'
-
-- name: Refresh the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
-
-- name: Create VM drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
- with_items: '{{ instances }}'
-
-- name: Create VM docker drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0'
- with_items: '{{ instances }}'
-
-- name: Create VMs
- virt:
- name: '{{ item }}'
- command: define
- xml: "{{ lookup('template', '../templates/domain.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
-
-- name: Start VMs
- virt:
- name: '{{ item }}'
- state: running
- uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
-
-- name: Wait for the VMs to get an IP
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''
- register: nb_allocated_ips
- until: nb_allocated_ips.stdout == '{{ instances | length }}'
- retries: 60
- delay: 3
- when: instances | length != 0
-
-- name: Collect IP addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
- register: scratch_ip
- with_items: '{{ instances }}'
-
-- set_fact:
- ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
-
-- set_fact:
- node_label:
- type: "{{ g_sub_host_type }}"
- when: instances | length > 0 and type == "node"
-
-- set_fact:
- node_label:
- type: "{{ type }}"
- when: instances | length > 0 and type != "node"
-
-- name: Add new instances
- add_host:
- hostname: '{{ item.0 }}'
- ansible_ssh_host: '{{ item.1 }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
- openshift_node_labels: "{{ node_label }}"
- libvirt_ip_address: "{{ item.1 }}"
- with_together:
- - '{{ instances }}'
- - '{{ ips }}'
-
-- name: Wait for ssh
- wait_for:
- host: '{{ item }}'
- port: 22
- with_items: '{{ ips }}'
-
-- name: Wait for openshift user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_together:
- - '{{ instances }}'
- - '{{ ips }}'
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
deleted file mode 100644
index 88504a5f6..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<domain type='kvm' id='8'>
- <name>{{ item }}</name>
- <memory unit='MiB'>{{ libvirt_instance_memory_mib }}</memory>
- <metadata xmlns:ansible="https://github.com/ansible/ansible">
- <ansible:tags>
- <ansible:tag>environment-{{ cluster_env }}</ansible:tag>
- <ansible:tag>clusterid-{{ cluster }}</ansible:tag>
- <ansible:tag>host-type-{{ type }}</ansible:tag>
- <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
- </ansible:tags>
- </metadata>
- <vcpu placement='static'>{{ libvirt_instance_vcpu }}</vcpu>
- <os>
- <type arch='x86_64' machine='pc'>hvm</type>
- <boot dev='hd'/>
- </os>
- <features>
- <acpi/>
- <apic/>
- <pae/>
- </features>
- <cpu mode='host-model'>
- <model fallback='allow'/>
- </cpu>
- <clock offset='utc'>
- <timer name='rtc' tickpolicy='catchup'/>
- <timer name='pit' tickpolicy='delay'/>
- <timer name='hpet' present='no'/>
- </clock>
- <on_poweroff>destroy</on_poweroff>
- <on_reboot>restart</on_reboot>
- <on_crash>restart</on_crash>
- <devices>
- <emulator>/usr/bin/qemu-system-x86_64</emulator>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2' discard='unmap'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
- <target dev='sda' bus='scsi'/>
- </disk>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2' discard='unmap'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/>
- <target dev='sdb' bus='scsi'/>
- </disk>
- <disk type='file' device='cdrom'>
- <driver name='qemu' type='raw'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
- <target dev='sdc' bus='scsi'/>
- <readonly/>
- </disk>
- <controller type='scsi' model='virtio-scsi' />
- <interface type='network'>
- <source network='{{ libvirt_network }}'/>
- <model type='virtio'/>
- </interface>
- <serial type='pty'>
- <target port='0'/>
- </serial>
- <console type='pty'>
- <target type='serial' port='0'/>
- </console>
- <memballoon model='virtio'>
- </memballoon>
- </devices>
-</domain>
diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data
deleted file mode 100644
index 6b421770d..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/meta-data
+++ /dev/null
@@ -1,3 +0,0 @@
-instance-id: {{ item[0] }}
-hostname: {{ item[0] }}
-local-hostname: {{ item[0] }}.example.com
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
deleted file mode 100644
index 0ce2a8342..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<network>
- <name>{{ libvirt_network }}</name>
- <forward mode='nat'>
- <nat>
- <port start='1024' end='65535'/>
- </nat>
- </forward>
- <!-- TODO: query for first available virbr interface available -->
- <bridge name='virbr3' stp='on' delay='0'/>
- <!-- TODO: make overridable -->
- <domain name='example.com' localOnly='yes' />
- <dns>
- <!-- TODO: automatically add host entries -->
- </dns>
- <!-- TODO: query for available address space -->
- <ip address='192.168.55.1' netmask='255.255.255.0'>
- <dhcp>
- <range start='192.168.55.2' end='192.168.55.254'/>
- <!-- TODO: add static entries addresses for the hosts to be created -->
- </dhcp>
- </ip>
-</network>
-
diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
deleted file mode 100644
index da139afd0..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<pool type='dir'>
- <name>{{ libvirt_storage_pool }}</name>
- <target>
- <path>{{ libvirt_storage_pool_path }}</path>
- </target>
-</pool>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
deleted file mode 100644
index fbcf7c886..000000000
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ /dev/null
@@ -1,43 +0,0 @@
-#cloud-config
-disable_root: true
-
-hostname: {{ item[0] }}
-fqdn: {{ item[0] }}.example.com
-
-mounts:
-- [ sdb ]
-
-users:
- - default
- - name: root
- ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-
-system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
-ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-
-write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
- - path: /etc/sysconfig/docker-storage-setup
- owner: root:root
- permissions: '0644'
- content: |
- DEVS=/dev/sdb
- VG=docker_vg
- EXTRA_DOCKER_STORAGE_OPTIONS='--storage-opt dm.blkdiscard=true'
- - path: /etc/systemd/system/fstrim.timer.d/hourly.conf
- content: |
- [Timer]
- OnCalendar=hourly
-
-runcmd:
- - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
- - systemctl enable --now fstrim.timer
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
deleted file mode 100644
index 8a63d11a5..000000000
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-# TODO: does not handle a non-existent cluster gracefully
-
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: cluster_group=tag_clusterid-{{ cluster_id }}
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: '{{ groups[cluster_group] | default([]) }}'
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Destroy VMs
- virt:
- name: '{{ item[0] }}'
- command: '{{ item[1] }}'
- uri: '{{ libvirt_uri }}'
- with_nested:
- - "{{ groups['oo_hosts_to_terminate'] }}"
- - [ destroy, undefine ]
-
- - name: Delete VM drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Delete VM docker drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Delete the VM cloud-init image
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- state: absent
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
- - name: Remove the cloud-init config directory
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- state: absent
- with_items: "{{ groups['oo_hosts_to_terminate'] }}"
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
deleted file mode 100644
index a152135fc..000000000
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: '{{ g_all_hosts }}'
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: '{{ g_all_hosts | default([]) }}'
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
deleted file mode 100644
index 5156789e7..000000000
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-default_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
-libvirt_storage_pool_path: "{{ lookup('oo_option', 'libvirt_storage_pool_path') | default(default_pool_path, True) }}"
-libvirt_storage_pool: "{{ lookup('oo_option', 'libvirt_storage_pool') | default('openshift-ansible', True) }}"
-libvirt_network: "{{ lookup('oo_option', 'libvirt_network') | default('openshift-ansible', True) }}"
-libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024, True) }}"
-libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2, True) }}"
-libvirt_uri: "{{ lookup('oo_option', 'libvirt_uri') | default('qemu:///system', True) }}"
-debug_level: 2
-
-# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
-# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
-deployment_rhel7_ent_base:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
- compression: ""
- ssh_user: openshift
- become: yes
-
-deployment_vars:
- origin:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
- compression: "{{ lookup('oo_option', 'image_compression') |
- default('xz', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
- ssh_user: openshift
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
deleted file mode 100644
index a6d8d6995..000000000
--- a/playbooks/openstack/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# OpenStack playbooks
-
-This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
-which is community supported and most use is considered deprecated.
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
deleted file mode 100644
index 12c9fd442..000000000
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
- | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
-
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
-
-g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_etcd'] | default([])) }}"
-
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
-
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
-
-g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
-
-g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
-
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
-
-g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
-
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}"
-
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
-
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
deleted file mode 100644
index f9ddb9469..000000000
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- include: ../../common/openshift-cluster/config.yml
- vars:
- g_nodeonmaster: true
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
- openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: "{{ debug_level }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_hosted_registry_selector: 'type=infra'
- openshift_hosted_router_selector: 'type=infra'
- openshift_master_cluster_method: 'native'
- openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
- os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
- openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
- openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
- openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
deleted file mode 100644
index 82329eac1..000000000
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ /dev/null
@@ -1,508 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: OpenShift cluster
-
-parameters:
-
- cluster_env:
- type: string
- label: Cluster environment
- description: Environment of the cluster
-
- cluster_id:
- type: string
- label: Cluster ID
- description: Identifier of the cluster
-
- subnet_24_prefix:
- type: string
- label: subnet /24 prefix
- description: /24 subnet prefix of the network of the cluster (dot separated number triplet)
-
- dns_nameservers:
- type: comma_delimited_list
- label: DNS nameservers list
- description: List of DNS nameservers
-
- external_net:
- type: string
- label: External network
- description: Name of the external network
- default: external
-
- ssh_public_key:
- type: string
- label: SSH public key
- description: SSH public key
- hidden: true
-
- ssh_incoming:
- type: string
- label: Source of ssh connections
- description: Source of legitimate ssh connections
- default: 0.0.0.0/0
-
- node_port_incoming:
- type: string
- label: Source of node port connections
- description: Authorized sources targeting node ports
- default: 0.0.0.0/0
-
- num_etcd:
- type: number
- label: Number of etcd nodes
- description: Number of etcd nodes
-
- num_masters:
- type: number
- label: Number of masters
- description: Number of masters
-
- num_nodes:
- type: number
- label: Number of compute nodes
- description: Number of compute nodes
-
- num_infra:
- type: number
- label: Number of infrastructure nodes
- description: Number of infrastructure nodes
-
- etcd_image:
- type: string
- label: Etcd image
- description: Name of the image for the etcd servers
-
- master_image:
- type: string
- label: Master image
- description: Name of the image for the master servers
-
- node_image:
- type: string
- label: Node image
- description: Name of the image for the compute node servers
-
- infra_image:
- type: string
- label: Infra image
- description: Name of the image for the infra node servers
-
- etcd_flavor:
- type: string
- label: Etcd flavor
- description: Flavor of the etcd servers
-
- master_flavor:
- type: string
- label: Master flavor
- description: Flavor of the master servers
-
- node_flavor:
- type: string
- label: Node flavor
- description: Flavor of the compute node servers
-
- infra_flavor:
- type: string
- label: Infra flavor
- description: Flavor of the infra node servers
-
-outputs:
-
- etcd_names:
- description: Name of the etcds
- value: { get_attr: [ etcd, name ] }
-
- etcd_ips:
- description: IPs of the etcds
- value: { get_attr: [ etcd, private_ip ] }
-
- etcd_floating_ips:
- description: Floating IPs of the etcds
- value: { get_attr: [ etcd, floating_ip ] }
-
- master_names:
- description: Name of the masters
- value: { get_attr: [ masters, name ] }
-
- master_ips:
- description: IPs of the masters
- value: { get_attr: [ masters, private_ip ] }
-
- master_floating_ips:
- description: Floating IPs of the masters
- value: { get_attr: [ masters, floating_ip ] }
-
- node_names:
- description: Name of the nodes
- value: { get_attr: [ compute_nodes, name ] }
-
- node_ips:
- description: IPs of the nodes
- value: { get_attr: [ compute_nodes, private_ip ] }
-
- node_floating_ips:
- description: Floating IPs of the nodes
- value: { get_attr: [ compute_nodes, floating_ip ] }
-
- infra_names:
- description: Name of the nodes
- value: { get_attr: [ infra_nodes, name ] }
-
- infra_ips:
- description: IPs of the nodes
- value: { get_attr: [ infra_nodes, private_ip ] }
-
- infra_floating_ips:
- description: Floating IPs of the nodes
- value: { get_attr: [ infra_nodes, floating_ip ] }
-
-resources:
-
- net:
- type: OS::Neutron::Net
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
-
- subnet:
- type: OS::Neutron::Subnet
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-subnet
- params:
- cluster_id: { get_param: cluster_id }
- network: { get_resource: net }
- cidr:
- str_replace:
- template: subnet_24_prefix.0/24
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- dns_nameservers: { get_param: dns_nameservers }
-
- router:
- type: OS::Neutron::Router
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-router
- params:
- cluster_id: { get_param: cluster_id }
- external_gateway_info:
- network: { get_param: external_net }
-
- interface:
- type: OS::Neutron::RouterInterface
- properties:
- router_id: { get_resource: router }
- subnet_id: { get_resource: subnet }
-
- keypair:
- type: OS::Nova::KeyPair
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-keypair
- params:
- cluster_id: { get_param: cluster_id }
- public_key: { get_param: ssh_public_key }
-
- master-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-master-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift cluster master
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 4001
- port_range_max: 4001
- - direction: ingress
- protocol: tcp
- port_range_min: 8443
- port_range_max: 8443
- - direction: ingress
- protocol: tcp
- port_range_min: 8444
- port_range_max: 8444
- - direction: ingress
- protocol: tcp
- port_range_min: 53
- port_range_max: 53
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- - direction: ingress
- protocol: tcp
- port_range_min: 8053
- port_range_max: 8053
- - direction: ingress
- protocol: udp
- port_range_min: 8053
- port_range_max: 8053
- - direction: ingress
- protocol: tcp
- port_range_min: 24224
- port_range_max: 24224
- - direction: ingress
- protocol: udp
- port_range_min: 24224
- port_range_max: 24224
- - direction: ingress
- protocol: tcp
- port_range_min: 2224
- port_range_max: 2224
- - direction: ingress
- protocol: udp
- port_range_min: 5404
- port_range_max: 5404
- - direction: ingress
- protocol: udp
- port_range_min: 5405
- port_range_max: 5405
- - direction: ingress
- protocol: tcp
- port_range_min: 9090
- port_range_max: 9090
-
- etcd-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-etcd-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id etcd cluster
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 2379
- port_range_max: 2379
- remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- - direction: ingress
- protocol: tcp
- port_range_min: 2380
- port_range_max: 2380
- remote_mode: remote_group_id
-
- node-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-node-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift cluster nodes
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: tcp
- port_range_min: 10250
- port_range_max: 10250
- remote_mode: remote_group_id
- - direction: ingress
- protocol: udp
- port_range_min: 4789
- port_range_max: 4789
- remote_mode: remote_group_id
- - direction: ingress
- protocol: tcp
- port_range_min: 30000
- port_range_max: 32767
- remote_ip_prefix: { get_param: node_port_incoming }
-
- infra-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-infra-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id OpenShift infrastructure cluster nodes
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 80
- port_range_max: 80
- - direction: ingress
- protocol: tcp
- port_range_min: 443
- port_range_max: 443
-
- etcd:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_etcd }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: etcd
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: etcd
- image: { get_param: etcd_image }
- flavor: { get_param: etcd_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: etcd-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- masters:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_masters }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: master
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: master
- image: { get_param: master_image }
- flavor: { get_param: master_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: master-secgrp }
- - { get_resource: node-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- compute_nodes:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_nodes }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-sub_host_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: node
- sub_host_type: compute
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: node
- subtype: compute
- image: { get_param: node_image }
- flavor: { get_param: node_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: node-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
-
- infra_nodes:
- type: OS::Heat::ResourceGroup
- properties:
- count: { get_param: num_infra }
- resource_def:
- type: heat_stack_server.yaml
- properties:
- name:
- str_replace:
- template: cluster_id-k8s_type-sub_host_type-%index%
- params:
- cluster_id: { get_param: cluster_id }
- k8s_type: node
- sub_host_type: infra
- cluster_env: { get_param: cluster_env }
- cluster_id: { get_param: cluster_id }
- type: node
- subtype: infra
- image: { get_param: infra_image }
- flavor: { get_param: infra_flavor }
- key_name: { get_resource: keypair }
- net: { get_resource: net }
- subnet: { get_resource: subnet }
- secgrp:
- - { get_resource: node-secgrp }
- - { get_resource: infra-secgrp }
- floating_network: { get_param: external_net }
- net_name:
- str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- depends_on:
- - interface
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
deleted file mode 100644
index 435139849..000000000
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: OpenShift cluster server
-
-parameters:
-
- name:
- type: string
- label: Name
- description: Name
-
- cluster_env:
- type: string
- label: Cluster environment
- description: Environment of the cluster
-
- cluster_id:
- type: string
- label: Cluster ID
- description: Identifier of the cluster
-
- type:
- type: string
- label: Type
- description: Type master or node
-
- subtype:
- type: string
- label: Sub-type
- description: Sub-type compute or infra for nodes, default otherwise
- default: default
-
- key_name:
- type: string
- label: Key name
- description: Key name of keypair
-
- image:
- type: string
- label: Image
- description: Name of the image
-
- flavor:
- type: string
- label: Flavor
- description: Name of the flavor
-
- net:
- type: string
- label: Net ID
- description: Net resource
-
- net_name:
- type: string
- label: Net name
- description: Net name
-
- subnet:
- type: string
- label: Subnet ID
- description: Subnet resource
-
- secgrp:
- type: comma_delimited_list
- label: Security groups
- description: Security group resources
-
- floating_network:
- type: string
- label: Floating network
- description: Network to allocate floating IP from
-
-outputs:
-
- name:
- description: Name of the server
- value: { get_attr: [ server, name ] }
-
- private_ip:
- description: Private IP of the server
- value:
- get_attr:
- - server
- - addresses
- - { get_param: net_name }
- - 0
- - addr
-
- floating_ip:
- description: Floating IP of the server
- value:
- get_attr:
- - server
- - addresses
- - { get_param: net_name }
- - 1
- - addr
-
-resources:
-
- server:
- type: OS::Nova::Server
- properties:
- name: { get_param: name }
- key_name: { get_param: key_name }
- image: { get_param: image }
- flavor: { get_param: flavor }
- networks:
- - port: { get_resource: port }
- user_data: { get_resource: config }
- user_data_format: RAW
- metadata:
- environment: { get_param: cluster_env }
- clusterid: { get_param: cluster_id }
- host-type: { get_param: type }
- sub-host-type: { get_param: subtype }
-
- port:
- type: OS::Neutron::Port
- properties:
- network: { get_param: net }
- fixed_ips:
- - subnet: { get_param: subnet }
- security_groups: { get_param: secgrp }
-
- floating-ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: floating_network }
- port_id: { get_resource: port }
-
- config:
- type: OS::Heat::CloudConfig
- properties:
- cloud_config:
- disable_root: true
-
- hostname: { get_param: name }
-
- system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
- write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- # content: Defaults:openshift !requiretty
- # Encoded in base64 to be sure that we do not forget the trailing newline or
- # sudo will not be able to parse that file
- encoding: b64
- content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg==
diff --git a/playbooks/openstack/openshift-cluster/filter_plugins b/playbooks/openstack/openshift-cluster/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/openstack/openshift-cluster/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
deleted file mode 100644
index c0bc12f55..000000000
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ /dev/null
@@ -1,191 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- # TODO: Write an Ansible module for dealing with HEAT stacks
- # Dealing with the outputs is currently terrible
-
- - name: Check OpenStack stack
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
- changed_when: false
- failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
-
- - set_fact:
- heat_stack_action: 'stack-create'
- when: stack_show_result.rc == 1
- - set_fact:
- heat_stack_action: 'stack-update'
- when: stack_show_result.rc == 0
-
- - name: Create or Update OpenStack Stack
- command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
- --timeout {{ openstack_heat_timeout }}
- -P cluster_env={{ cluster_env }}
- -P cluster_id={{ cluster_id }}
- -P subnet_24_prefix={{ openstack_subnet_24_prefix }}
- -P dns_nameservers={{ openstack_network_dns | join(",") }}
- -P external_net={{ openstack_network_external_net }}
- -P ssh_public_key="{{ openstack_ssh_public_key }}"
- -P ssh_incoming={{ openstack_ssh_access_from }}
- -P node_port_incoming={{ openstack_node_port_access_from }}
- -P num_etcd={{ num_etcd }}
- -P num_masters={{ num_masters }}
- -P num_nodes={{ num_nodes }}
- -P num_infra={{ num_infra }}
- -P etcd_image={{ deployment_vars[deployment_type].image }}
- -P master_image={{ deployment_vars[deployment_type].image }}
- -P node_image={{ deployment_vars[deployment_type].image }}
- -P infra_image={{ deployment_vars[deployment_type].image }}
- -P etcd_flavor={{ openstack_flavor["etcd"] }}
- -P master_flavor={{ openstack_flavor["master"] }}
- -P node_flavor={{ openstack_flavor["node"] }}
- -P infra_flavor={{ openstack_flavor["infra"] }}
- openshift-ansible-{{ cluster_id }}-stack'
- args:
- chdir: '{{ playbook_dir }}'
-
- - name: Wait for OpenStack Stack readiness
- shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
- register: stack_show_status_result
- until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
- retries: 30
- delay: 5
-
- - name: Display the stack resources
- command: 'heat resource-list openshift-ansible-{{ cluster_id }}-stack'
- register: stack_resource_list_result
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Display the stack status
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Delete the stack
- command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - fail:
- msg: |
-
- +--------------------------------------+
- | ^ |
- | /!\ Failed to create the heat stack |
- | /___\ |
- +--------------------------------------+
-
- Here is the list of stack resources and their status:
- {{ stack_resource_list_result.stdout }}
-
- Here is the status of the stack:
- {{ stack_show_result.stdout }}
-
- ^ Failed to create the heat stack
- /!\
- /___\ Please check the `stack_status_reason` line in the above array to know why.
- when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
- - name: Read OpenStack Stack outputs
- command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
- register: stack_show_result
-
- - set_fact:
- parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}"
-
- - name: Add new etcd instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "etcd"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.etcd_names }}'
- - '{{ parsed_outputs.etcd_ips }}'
- - '{{ parsed_outputs.etcd_floating_ips }}'
-
- - name: Add new master instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "master"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.master_names }}'
- - '{{ parsed_outputs.master_ips }}'
- - '{{ parsed_outputs.master_floating_ips }}'
-
- - name: Add new node instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "compute"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.node_names }}'
- - '{{ parsed_outputs.node_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
-
- - name: Add new infra instances groups and variables
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[2] }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}'
- openshift_node_labels:
- type: "infra"
- openstack:
- public_v4: '{{ item[2] }}'
- private_v4: '{{ item[1] }}'
- with_together:
- - '{{ parsed_outputs.infra_names }}'
- - '{{ parsed_outputs.infra_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
- - name: Wait for ssh
- wait_for:
- host: '{{ item }}'
- port: 22
- with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
- - name: Wait for user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
-
-- include: update.yml
-
-- include: list.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
deleted file mode 100644
index 6c6f671be..000000000
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Generate oo_list_hosts group
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - set_fact: scratch_group=meta-clusterid_{{ cluster_id }}
- when: cluster_id != ''
- - set_fact: scratch_group=all
- when: cluster_id == ''
- - add_host:
- name: "{{ item }}"
- groups: oo_list_hosts
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
- oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
- with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- - debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/lookup_plugins b/playbooks/openstack/openshift-cluster/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/openstack/openshift-cluster/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/roles b/playbooks/openstack/openshift-cluster/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/openstack/openshift-cluster/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
deleted file mode 100644
index affb57117..000000000
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- name: Terminate instance(s)
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['meta-environment_' ~ cluster_env]|default([])) | intersect(groups['meta-clusterid_' ~ cluster_id ]|default([])) }}"
-
-- name: Unsubscribe VMs
- hosts: oo_hosts_to_terminate
- vars_files:
- - vars.yml
- roles:
- - role: rhel_unsubscribe
- when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
-- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- - name: Delete the OpenStack Stack
- command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
- register: stack_delete_result
- changed_when: stack_delete_result.rc == 0
- failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout
-
- - name: Wait for the completion of the OpenStack Stack deletion
- shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
- when: stack_delete_result.changed
- register: stack_show_result
- until: stack_show_result.stdout != 'DELETE_IN_PROGRESS'
- retries: 60
- delay: 5
- failed_when: '"Stack not found" not in stack_show_result.stderr and
- stack_show_result.stdout != "DELETE_COMPLETE"'
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
deleted file mode 100644
index 6d2af3d26..000000000
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: vars.yml
- - include_vars: cluster_hosts.yml
-
-- name: Populate oo_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Evaluate oo_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- include: config.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
deleted file mode 100644
index ba2855b73..000000000
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-# yamllint disable rule:colons
----
-debug_level: 2
-openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
- default('files/heat_stack.yaml', True) }}"
-openstack_subnet_24_prefix: "{{ lookup('oo_option', 'subnet_24_prefix' ) |
- default('192.168.' + ( ( 1048576 | random % 256 ) | string() ), True) }}"
-openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) |
- default('external', True) }}"
-openstack_network_dns: "{{ lookup('oo_option', 'dns' ) |
- default('8.8.8.8,8.8.4.4', True) | oo_split() }}"
-openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') |
- default('~/.ssh/id_rsa.pub', True)) }}"
-openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
- default('0.0.0.0/0', True) }}"
-openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
- default('0.0.0.0/0', True) }}"
-openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') |
- default('3', True) }}"
-openstack_flavor:
- etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
- master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
- infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
- node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
-
-deployment_rhel7_ent_base:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
- ssh_user: openshift
- become: yes
-
-deployment_vars:
- origin:
- image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
- ssh_user: openshift
- become: yes
- enterprise: "{{ deployment_rhel7_ent_base }}"
- openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
- atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index 0b3a2a69d..ce2ae8365 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -1,7 +1,7 @@
---
__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose-
-__ansible_service_broker_image_tag: latest
+__ansible_service_broker_image_tag: v3.6
__ansible_service_broker_etcd_image_prefix: rhel7/
__ansible_service_broker_etcd_image_tag: latest
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index b1907f8cb..be73e8a73 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -11,4 +11,4 @@ calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/
calico_ipv4pool_ipip: "always"
calico_log_dir: "/var/log/calico"
-calico_node_image: "calico/node:v2.4.1"
+calico_node_image: "calico/node:v2.5.0"
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
index d40286aba..01a2b9529 100644
--- a/roles/calico_master/defaults/main.yaml
+++ b/roles/calico_master/defaults/main.yaml
@@ -3,5 +3,5 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf
calicoctl_bin_dir: "/usr/local/bin/"
-calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.4.0/calicoctl"
+calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.5.0/calicoctl"
calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.7.0"
diff --git a/roles/cockpit/defaults/main.yml b/roles/cockpit/defaults/main.yml
index cbe5bb92b..15c40e3b5 100644
--- a/roles/cockpit/defaults/main.yml
+++ b/roles/cockpit/defaults/main.yml
@@ -1,6 +1,6 @@
---
-r_cockpit_firewall_enabled: True
-r_cockpit_use_firewalld: False
+r_cockpit_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_cockpit_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
r_cockpit_os_firewall_deny: []
r_cockpit_os_firewall_allow:
diff --git a/roles/dns/README.md b/roles/dns/README.md
deleted file mode 100644
index 9a88ce97c..000000000
--- a/roles/dns/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-dns
-===
-
-Configure a DNS server serving IPs of all the nodes of the cluster
-
-Requirements
-------------
-
-Ansible 2.2
-
-Role Variables
---------------
-
-| Name | Mandatory / Optional | Description |
-|------|----------------------|-------------|
-| `dns_zones` | Mandatory | DNS zones in which we must find the hosts |
-| `dns_forwarders` | If not set, the DNS will be a recursive non-forwarding DNS server | DNS forwarders to delegate the requests for hosts outside of `dns_zones` |
-| `dns_all_hosts` | Mandatory | Exhaustive list of hosts |
-| `base_docker_image` | Optional | Base docker image to build Bind image from, used only in containerized deployments |
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
- - hosts: dns_hosts
- roles:
- - role: dns
- dns_forwarders: [ '8.8.8.8', '8.8.4.4' ]
- dns_zones: [ novalocal, openstacklocal ]
- dns_all_hosts: "{{ g_all_hosts }}"
- base_docker_image: 'centos:centos7'
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/dns/defaults/main.yml b/roles/dns/defaults/main.yml
deleted file mode 100644
index 82055c8cd..000000000
--- a/roles/dns/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-base_docker_image: "{{ 'centos:centos7' if openshift.common.deployment_type == 'origin' else 'rhel7' }}"
diff --git a/roles/dns/handlers/main.yml b/roles/dns/handlers/main.yml
deleted file mode 100644
index 61fd7a10e..000000000
--- a/roles/dns/handlers/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- name: restart bind
- systemd:
- name: named
- state: restarted
diff --git a/roles/dns/meta/main.yml b/roles/dns/meta/main.yml
deleted file mode 100644
index 64d56114e..000000000
--- a/roles/dns/meta/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-galaxy_info:
- author: Lénaïc Huard
- description: Deploy and configure a DNS server
- company: Amadeus SAS
- license: ASL 2.0
- min_ansible_version: 2.2
-dependencies:
-- { role: openshift_facts }
diff --git a/roles/dns/tasks/main.yml b/roles/dns/tasks/main.yml
deleted file mode 100644
index c5ab53b4d..000000000
--- a/roles/dns/tasks/main.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Install Bind
- package: name=bind state=present
- when: not openshift.common.is_containerized | bool
-
-- name: Create docker build dir
- file: path=/tmp/dockerbuild state=directory
- when: openshift.common.is_containerized | bool
-
-- name: Install dockerfile
- template:
- dest: "/tmp/dockerbuild/Dockerfile"
- src: Dockerfile
- when: openshift.common.is_containerized | bool
-
-- name: Build Bind image
- docker_image: path="/tmp/dockerbuild" name="bind" state=present
- when: openshift.common.is_containerized | bool
-
-- name: Install bind service file
- template:
- dest: "/etc/systemd/system/named.service"
- src: named.service.j2
- when: openshift.common.is_containerized | bool
-
-- name: Create bind zone dir
- file: path=/var/named state=directory
- when: openshift.common.is_containerized | bool
-
-- name: Configure Bind
- template:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- with_items:
- - src: openshift-cluster.zone
- dest: /var/named/openshift-cluster.zone
- - src: named.conf
- dest: /etc/named.conf
- notify: restart bind
-
-- name: Enable Bind
- systemd:
- name: named
- state: started
- enabled: yes
- daemon_reload: yes
diff --git a/roles/dns/templates/Dockerfile b/roles/dns/templates/Dockerfile
deleted file mode 100644
index cdff0a228..000000000
--- a/roles/dns/templates/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM {{ base_docker_image }}
-MAINTAINER Jan Provaznik <jprovazn@redhat.com>
-
-# install main packages:
-RUN yum -y update; yum clean all;
-RUN yum -y install bind-utils bind
-
-EXPOSE 53
-
-# start services:
-CMD ["/usr/sbin/named", "-f"]
diff --git a/roles/dns/templates/named.conf b/roles/dns/templates/named.conf
deleted file mode 100644
index 22c1ff935..000000000
--- a/roles/dns/templates/named.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-options
-{
- directory "/var/named";
-
- allow-query { {{ ansible_default_ipv4.network }}/24; };
-
- recursion yes;
-
-{% if dns_forwarders is defined %}
- forwarders {
- {% for dns in dns_forwarders %}
- {{ dns }};
- {% endfor %}
- };
-{% endif %}
-};
-{% for zone in dns_zones %}
-
-zone "{{ zone }}" IN {
- type master;
- file "openshift-cluster.zone";
-};
-{% endfor %}
diff --git a/roles/dns/templates/named.service.j2 b/roles/dns/templates/named.service.j2
deleted file mode 100644
index 6e0a7a640..000000000
--- a/roles/dns/templates/named.service.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-[Unit]
-Requires={{ openshift.docker.service_name }}.service
-After={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-ExecStartPre=/usr/bin/docker run --rm -v /etc/named.conf:/etc/named.conf -v /var/named:/var/named:z bind named-checkconf -z /etc/named.conf
-ExecStartPre=-/usr/bin/docker rm -f bind
-ExecStart=/usr/bin/docker run --name bind -p 53:53/udp -v /var/log:/var/log -v /etc/named.conf:/etc/named.conf -v /var/named:/var/named:z bind
-ExecStop=/usr/bin/docker stop bind
-
-[Install]
-WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/dns/templates/openshift-cluster.zone b/roles/dns/templates/openshift-cluster.zone
deleted file mode 100644
index 03f5dc089..000000000
--- a/roles/dns/templates/openshift-cluster.zone
+++ /dev/null
@@ -1,14 +0,0 @@
-$TTL 1d
-@ IN SOA {{ ansible_hostname }} openshift (
- {{ ansible_date_time.epoch }} ; Serial (To be fixed before 2039)
- 12h ; Refresh
- 3m ; Retry
- 4w ; Expire
- 3h ; TTL for negative replies
- )
-
- IN NS {{ ansible_hostname }}
-{{ ansible_hostname }} IN A {{ ansible_default_ipv4.address }}
-{% for host in dns_all_hosts %}
-{{ hostvars[host].ansible_hostname }} IN A {{ hostvars[host]['ansible_default_ipv4'].address }}
-{% endfor %}
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 1f9ac5059..78c6671d8 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -7,8 +7,8 @@
- set_fact:
l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}"
- l_use_crio: "{{ openshift.docker.use_crio | default(False) }}"
- l_use_crio_only: "{{ openshift.docker.use_crio_only | default(False) }}"
+ l_use_crio: "{{ openshift_use_crio | default(False) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"
- name: Use Package Docker if Requested
include: package_docker.yml
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 787f51f94..451b8498f 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -3,6 +3,15 @@
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}"
when: openshift.docker.insecure_registries
+- set_fact:
+ l_crio_registries: "{{ openshift.docker.additional_registries + ['docker.io'] }}"
+ when: openshift.docker.additional_registries
+- set_fact:
+ l_crio_registries: "{{ ['docker.io'] }}"
+ when: not openshift.docker.additional_registries
+- set_fact:
+ l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
+ when: openshift.docker.additional_registries
- name: Ensure container-selinux is installed
package:
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index eae1759ab..b4ee84fd0 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -43,7 +43,7 @@ stream_port = "10010"
# This is a mandatory setting as this runtime will be the default one
# and will also be used for untrusted container workloads if
# runtime_untrusted_workload is not set.
-runtime = "/usr/libexec/crio/runc"
+runtime = "/usr/bin/runc"
# runtime_untrusted_workload is the OCI compatible runtime used for untrusted
# container workloads. This is an optional setting, except if
@@ -120,6 +120,11 @@ insecure_registries = [
{{ l_insecure_crio_registries|default("") }}
]
+# registries is used to specify a comma separated list of registries to be used
+# when pulling an unqualified image (e.g. fedora:rawhide).
+registries = [
+{{ l_additional_crio_registries|default("") }}
+]
# The "crio.network" table contains settings pertaining to the
# management of CNI plugins.
[crio.network]
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index d12d7a358..3cc2bbb18 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,6 +1,6 @@
---
-r_etcd_firewall_enabled: True
-r_etcd_use_firewalld: False
+r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}"
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index ce362c743..2c2803aee 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -11,7 +11,8 @@
ETCD_NAME={{ etcd_hostname }}
ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
ETCD_DATA_DIR={{ etcd_data_dir }}
-#ETCD_SNAPSHOT_COUNTER=10000
+#ETCD_WAL_DIR=""
+#ETCD_SNAPSHOT_COUNT=10000
ETCD_HEARTBEAT_INTERVAL=500
ETCD_ELECTION_TIMEOUT=2500
ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
@@ -41,24 +42,43 @@ ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
#ETCD_DISCOVERY_PROXY=
{% endif %}
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
+#ETCD_STRICT_RECONFIG_CHECK="false"
+#ETCD_AUTO_COMPACTION_RETENTION="0"
+#ETCD_ENABLE_V2="true"
#[proxy]
#ETCD_PROXY=off
+#ETCD_PROXY_FAILURE_WAIT="5000"
+#ETCD_PROXY_REFRESH_INTERVAL="30000"
+#ETCD_PROXY_DIAL_TIMEOUT="1000"
+#ETCD_PROXY_WRITE_TIMEOUT="5000"
+#ETCD_PROXY_READ_TIMEOUT="0"
#[security]
{% if etcd_url_scheme == 'https' -%}
-ETCD_CA_FILE={{ etcd_ca_file }}
+ETCD_TRUSTED_CA_FILE={{ etcd_ca_file }}
+ETCD_CLIENT_CERT_AUTH="true"
ETCD_CERT_FILE={{ etcd_cert_file }}
ETCD_KEY_FILE={{ etcd_key_file }}
{% endif -%}
+#ETCD_AUTO_TLS="false"
{% if etcd_peer_url_scheme == 'https' -%}
-ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }}
+ETCD_PEER_TRUSTED_CA_FILE={{ etcd_peer_ca_file }}
+ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }}
ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
{% endif -%}
+#ETCD_PEER_AUTO_TLS="false"
#[logging]
ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}"
{% if etcd_log_package_levels is defined %}
ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}"
{% endif %}
+
+#[profiling]
+#ETCD_ENABLE_PPROF="false"
+#ETCD_METRICS="basic"
+#
+#[auth]
+#ETCD_AUTH_TOKEN="simple"
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index b1bfa4592..89993f7ea 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -63,3 +63,13 @@ etcd_client_port: 2379
etcd_peer_port: 2380
etcd_url_scheme: http
etcd_peer_url_scheme: http
+
+etcd_initial_cluster_state: new
+etcd_initial_cluster_token: etcd-cluster-1
+
+etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
+etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
+
+etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
diff --git a/roles/etcd_migrate/tasks/add_ttls.yml b/roles/etcd_migrate/tasks/add_ttls.yml
new file mode 100644
index 000000000..c10465af9
--- /dev/null
+++ b/roles/etcd_migrate/tasks/add_ttls.yml
@@ -0,0 +1,33 @@
+---
+# To be executed on first master
+- slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_output
+
+- set_fact:
+ accessTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.accessTokenMaxAgeSeconds | default(86400) }}"
+ authroizeTokenMaxAgeSeconds: "{{ (g_master_config_output.content|b64decode|from_yaml).oauthConfig.tokenConfig.authroizeTokenMaxAgeSeconds | default(500) }}"
+ controllerLeaseTTL: "{{ (g_master_config_output.content|b64decode|from_yaml).controllerLeaseTTL | default(30) }}"
+- name: Re-introduce leases (as a replacement for key TTLs)
+ command: >
+ oadm migrate etcd-ttl \
+ --cert {{ r_etcd_common_master_peer_cert_file }} \
+ --key {{ r_etcd_common_master_peer_key_file }} \
+ --cacert {{ r_etcd_common_master_peer_ca_file }} \
+ --etcd-address 'https://{{ etcd_peer }}:{{ etcd_client_port }}' \
+ --ttl-keys-prefix {{ item.keys }} \
+ --lease-duration {{ item.ttl }}
+ environment:
+ ETCDCTL_API: 3
+ PATH: "/usr/local/bin:/var/usrlocal/bin:{{ ansible_env.PATH }}"
+ with_items:
+ - keys: "/kubernetes.io/events"
+ ttl: "1h"
+ - keys: "/kubernetes.io/masterleases"
+ ttl: "10s"
+ - keys: "/openshift.io/oauth/accesstokens"
+ ttl: "{{ accessTokenMaxAgeSeconds }}s"
+ - keys: "/openshift.io/oauth/authorizetokens"
+ ttl: "{{ authroizeTokenMaxAgeSeconds }}s"
+ - keys: "/openshift.io/leases/controllers"
+ ttl: "{{ controllerLeaseTTL }}s"
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml
index b66696b55..0804d9e1c 100644
--- a/roles/etcd_migrate/tasks/check.yml
+++ b/roles/etcd_migrate/tasks/check.yml
@@ -1,7 +1,4 @@
---
-- fail:
- msg: "Currently etcd v3 migration is unsupported while we test it more thoroughly"
- when: not openshift_enable_unsupported_configurations | default(false) | bool
# Check the cluster is healthy
- include: check_cluster_health.yml
diff --git a/roles/etcd_migrate/tasks/clean_data.yml b/roles/etcd_migrate/tasks/clean_data.yml
new file mode 100644
index 000000000..95a0e7c0a
--- /dev/null
+++ b/roles/etcd_migrate/tasks/clean_data.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove member data
+ file:
+ path: /var/lib/etcd/member
+ state: absent
diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml
index 409b0b613..e82f6a6b4 100644
--- a/roles/etcd_migrate/tasks/main.yml
+++ b/roles/etcd_migrate/tasks/main.yml
@@ -1,8 +1,8 @@
---
- name: Fail if invalid r_etcd_migrate_action provided
fail:
- msg: "etcd_migrate role can only be called with 'check' or 'migrate' or 'configure'"
- when: r_etcd_migrate_action not in ['check', 'migrate', 'configure']
+ msg: "etcd_migrate role can only be called with 'check', 'migrate', 'configure', 'add_ttls', or 'clean_data'"
+ when: r_etcd_migrate_action not in ['check', 'migrate', 'configure', 'add_ttls', 'clean_data']
- name: Include main action task file
include: "{{ r_etcd_migrate_action }}.yml"
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd_migrate/tasks/migrate.yml
index b2cf6d20a..54a9c74ff 100644
--- a/roles/etcd_migrate/tasks/migrate.yml
+++ b/roles/etcd_migrate/tasks/migrate.yml
@@ -3,62 +3,54 @@
- set_fact:
l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
-- name: Disable etcd members
- service:
- name: "{{ l_etcd_service }}"
- state: stopped
-
-# Should we skip all TTL keys? https://bugzilla.redhat.com/show_bug.cgi?id=1389773
- name: Migrate etcd data
command: >
etcdctl migrate --data-dir={{ etcd_data_dir }}
environment:
ETCDCTL_API: 3
register: l_etcdctl_migrate
-
# TODO(jchaloup): If any of the members fails, we need to restore all members to v2 from the pre-migrate backup
- name: Check the etcd v2 data are correctly migrated
fail:
msg: "Failed to migrate a member"
when: "'finished transforming keys' not in l_etcdctl_migrate.stdout and 'no v2 keys to migrate' not in l_etcdctl_migrate.stdout"
-
- name: Migration message
debug:
msg: "Etcd migration finished with: {{ l_etcdctl_migrate.stdout }}"
-
-- name: Enable etcd member
- service:
+- name: Set ETCD_FORCE_NEW_CLUSTER=true on first etcd host
+ lineinfile:
+ line: "ETCD_FORCE_NEW_CLUSTER=true"
+ dest: /etc/etcd/etcd.conf
+ backup: true
+- name: Start etcd
+ systemd:
name: "{{ l_etcd_service }}"
state: started
-
-- name: Wait for cluster to become healthy after migration
+- name: Wait for cluster to become healthy after bringing up first member
command: >
etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
register: l_etcd_migrate_health
until: l_etcd_migrate_health.rc == 0
retries: 3
delay: 30
- run_once: true
+- name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host
+ lineinfile:
+ line: "ETCD_FORCE_NEW_CLUSTER=true"
+ dest: /etc/etcd/etcd.conf
+ state: absent
+ backup: true
+- name: Restart first etcd host
+ systemd:
+ name: "{{ l_etcd_service }}"
+ state: restarted
-# NOTE: /usr/local/bin may be removed from the PATH by ansible hence why
-# it's added to the environment in this task.
-- name: Re-introduce leases (as a replacement for key TTLs)
+- name: Wait for cluster to become healthy after bringing up first member
command: >
- oadm migrate etcd-ttl \
- --cert {{ r_etcd_common_master_peer_cert_file }} \
- --key {{ r_etcd_common_master_peer_key_file }} \
- --cacert {{ r_etcd_common_master_peer_ca_file }} \
- --etcd-address 'https://{{ etcd_peer }}:{{ etcd_client_port }}' \
- --ttl-keys-prefix {{ item }} \
- --lease-duration 1h
- environment:
- ETCDCTL_API: 3
- PATH: "/usr/local/bin:/var/usrlocal/bin:{{ ansible_env.PATH }}"
- with_items:
- - "/kubernetes.io/events"
- - "/kubernetes.io/masterleases"
- delegate_to: "{{ groups.oo_first_master[0] }}"
- run_once: true
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
+ register: l_etcd_migrate_health
+ until: l_etcd_migrate_health.rc == 0
+ retries: 3
+ delay: 30
- set_fact:
r_etcd_migrate_success: true
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index ddf8230ec..71c8f38c3 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,7 +1,6 @@
---
-flannel_network: "{{ openshift.common.portal_net | default('172.30.0.0/16', true) }}"
-flannel_min_network: 172.30.5.0
-flannel_subnet_len: 24
+flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}"
+flannel_subnet_len: "{{ 32 - openshift.master.sdn_host_subnet_length }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json
index 89ce4c30b..bba3729fa 100644
--- a/roles/flannel_register/templates/flannel-config.json
+++ b/roles/flannel_register/templates/flannel-config.json
@@ -1,7 +1,6 @@
{
"Network": "{{ flannel_network }}",
"SubnetLen": {{ flannel_subnet_len }},
- "SubnetMin": "{{ flannel_min_network }}",
"Backend": {
"Type": "host-gw"
}
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 1b73bfd0e..45d7444a4 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1288,13 +1288,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
new file mode 100644
index 000000000..231857cca
--- /dev/null
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -0,0 +1,1649 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/csr -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_adm_csr
+short_description: Module to approve or deny openshift certificate signing requests
+description:
+ - Wrapper around the openshift `oc adm certificate approve|deny <csr>` command.
+options:
+ state:
+ description:
+ - approve|deny|list Approve, deny, and list are the only supported states for certificates
+ required: false
+ default: present
+ choices:
+ - present
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ nodes:
+ description:
+ - A list of the names of the nodes in which to accept the certificates
+ required: false
+ default: None
+ aliases: []
+ timeout:
+ description:
+ - This flag allows for a timeout value when approving nodes.
+ required: false
+ default: 30
+ aliases: []
+ timeout:
+ description:
+ - This flag allows for a timeout value when doing node approvals.
+ - A zero value for the timeout will block until the nodes have been accepted
+ required: false
+ default: 30
+ aliases: []
+ approve_all:
+ description:
+ - This flag allows for the module to approve all CSRs that are found.
+ - This facilitates testing.
+ required: false
+ default: False
+ aliases: []
+ service_account:
+ description:
+ - This parameter tells the approval process which service account is being used for the requests
+ required: false
+ default: node-bootstrapper
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Approve certificates for node xyz
+ oc_adm_scr:
+ nodes:
+ - xyz
+ timeout: 300
+
+- name: Approve certificates for node xyz
+ oc_adm_scr:
+ nodes:
+ - xyz
+ timeout: 0
+'''
+
+# -*- -*- -*- End included fragment: doc/csr -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "cmd": ' '.join(cmds)}
+
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
+ rval.update({"stderr": stderr,
+ "stdout": stdout})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import rpm
+
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
+
+ return rpmquery.count() > 0
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_adm_csr.py -*- -*- -*-
+
+
+class OCcsr(OpenShiftCLI):
+ ''' Class to wrap the oc adm certificate command line'''
+ kind = 'csr'
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ nodes=None,
+ approve_all=False,
+ service_account=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for oc adm certificate '''
+ super(OCcsr, self).__init__(None, kubeconfig, verbose)
+ self.service_account = service_account
+ self.nodes = self.create_nodes(nodes)
+ self._csrs = []
+ self.approve_all = approve_all
+ self.verbose = verbose
+
+ @property
+ def csrs(self):
+ '''property for managing csrs'''
+ # any processing needed??
+ self._csrs = self._get(resource=self.kind)['results'][0]['items']
+ return self._csrs
+
+ def create_nodes(self, nodes):
+ '''create a node object to track csr signing status'''
+ nodes_list = []
+
+ if nodes is None:
+ return nodes_list
+
+ results = self._get(resource='nodes')['results'][0]['items']
+
+ for node in nodes:
+ nodes_list.append(dict(name=node, csrs={}, accepted=False, denied=False))
+
+ for ocnode in results:
+ if node in ocnode['metadata']['name']:
+ nodes_list[-1]['accepted'] = True
+
+ return nodes_list
+
+ def get(self):
+ '''get the current certificate signing requests'''
+ return self.csrs
+
+ @staticmethod
+ def action_needed(csr, action):
+ '''check to see if csr is in desired state'''
+ if csr['status'] == {}:
+ return True
+
+ state = csr['status']['conditions'][0]['type']
+
+ if action == 'approve' and state != 'Approved':
+ return True
+
+ elif action == 'deny' and state != 'Denied':
+ return True
+
+ return False
+
+ def match_node(self, csr):
+ '''match an inc csr to a node in self.nodes'''
+ for node in self.nodes:
+ # we have a match
+ if node['name'] in csr['metadata']['name']:
+ node['csrs'][csr['metadata']['name']] = csr
+
+ # check that the username is the node and type is 'Approved'
+ if node['name'] in csr['spec']['username'] and csr['status']:
+ if csr['status']['conditions'][0]['type'] == 'Approved':
+ node['accepted'] = True
+ # check type is 'Denied' and mark node as such
+ if csr['status'] and csr['status']['conditions'][0]['type'] == 'Denied':
+ node['denied'] = True
+
+ return node
+
+ return None
+
+ def finished(self):
+ '''determine if there are more csrs to sign'''
+ # if nodes is set and we have nodes then return if all nodes are 'accepted'
+ if self.nodes is not None and len(self.nodes) > 0:
+ return all([node['accepted'] or node['denied'] for node in self.nodes])
+
+ # we are approving everything or we still have nodes outstanding
+ return False
+
+ def manage(self, action):
+ '''run openshift oc adm ca create-server-cert cmd and store results into self.nodes
+
+ we attempt to verify if the node is one that was given to us to accept.
+
+ action - (allow | deny)
+ '''
+
+ results = []
+ # There are 2 types of requests:
+ # - node-bootstrapper-client-ip-172-31-51-246-ec2-internal
+ # The client request allows the client to talk to the api/controller
+ # - node-bootstrapper-server-ip-172-31-51-246-ec2-internal
+ # The server request allows the server to join the cluster
+ # Here we need to determine how to approve/deny
+ # we should query the csrs and verify they are from the nodes we thought
+ for csr in self.csrs:
+ node = self.match_node(csr)
+ # oc adm certificate <approve|deny> csr
+ # there are 3 known states: Denied, Aprroved, {}
+ # verify something is needed by OCcsr.action_needed
+ # if approve_all, then do it
+ # if you passed in nodes, you must have a node that matches
+ if self.approve_all or (node and OCcsr.action_needed(csr, action)):
+ result = self.openshift_cmd(['certificate', action, csr['metadata']['name']], oadm=True)
+ # client should have service account name in username field
+ # server should have node name in username field
+ if node and csr['metadata']['name'] not in node['csrs']:
+ node['csrs'][csr['metadata']['name']] = csr
+
+ # accept node in cluster
+ if node['name'] in csr['spec']['username']:
+ node['accepted'] = True
+
+ results.append(result)
+
+ return results
+
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''run the idempotent ansible code'''
+
+ client = OCcsr(params['nodes'],
+ params['approve_all'],
+ params['service_account'],
+ params['kubeconfig'],
+ params['debug'])
+
+ state = params['state']
+
+ api_rval = client.get()
+
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ if state in ['approve', 'deny']:
+ if check_mode:
+ return {'changed': True,
+ 'msg': "CHECK_MODE: Would have {} the certificate.".format(params['state']),
+ 'state': state}
+
+ all_results = []
+ finished = False
+ timeout = False
+ import time
+ # loop for timeout or block until all nodes pass
+ ctr = 0
+ while True:
+
+ all_results.extend(client.manage(params['state']))
+ if client.finished():
+ finished = True
+ break
+
+ if params['timeout'] == 0:
+ if not params['approve_all']:
+ ctr = 0
+
+ if ctr * 2 > params['timeout']:
+ timeout = True
+ break
+
+ # This provides time for the nodes to send their csr requests between approvals
+ time.sleep(2)
+
+ ctr += 1
+
+ for result in all_results:
+ if result['returncode'] != 0:
+ return {'failed': True, 'msg': all_results}
+
+ return dict(changed=len(all_results) > 0,
+ results=all_results,
+ nodes=client.nodes,
+ state=state,
+ finished=finished,
+ timeout=timeout)
+
+ return {'failed': True,
+ 'msg': 'Unknown state passed. %s' % state}
+
+
+# -*- -*- -*- End included fragment: class/oc_adm_csr.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_adm_csr.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for approving certificate signing requests
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='approve', type='str',
+ choices=['approve', 'deny', 'list']),
+ debug=dict(default=False, type='bool'),
+ nodes=dict(default=None, type='list'),
+ timeout=dict(default=30, type='int'),
+ approve_all=dict(default=False, type='bool'),
+ service_account=dict(default='node-bootstrapper', type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['approve_all', 'nodes']],
+ )
+
+ if module.params['nodes'] == []:
+ module.fail_json(**dict(failed=True, msg='Please specify hosts.'))
+
+ rval = OCcsr.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_adm_csr.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index b09321a5b..44f3f57d8 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1274,13 +1274,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 7154fd839..687cff579 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1260,13 +1260,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 3fcf49799..ddf5d90b7 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1260,13 +1260,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index bf2650460..c00eee381 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1378,13 +1378,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index a2b7d12c0..0c925ab0b 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1403,13 +1403,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
index 955c6313e..79bd08f4e 100644
--- a/roles/lib_openshift/library/oc_atomic_container.py
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -83,7 +83,7 @@ def _install(module, container, image, values_list):
if rc != 0:
return rc, out, err, False
else:
- changed = "Extracting" in out
+ changed = "Extracting" in out or "Copying blob" in out
return rc, out, err, changed
def _uninstall(module, name):
@@ -127,7 +127,7 @@ def do_update(module, container, old_image, image, values_list):
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
- changed = "Extracting" in out
+ changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=out, changed=changed)
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index d101eac1c..567ecfd4e 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1252,13 +1252,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 7cd29215f..9515de569 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1258,13 +1258,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 5b11f45ba..d461e5ae9 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1302,13 +1302,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index d3834ce0c..22ad58725 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1269,13 +1269,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 0d751fe28..b6c6e47d9 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1242,13 +1242,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 3a6ba3e56..f7fc286e0 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1261,13 +1261,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 5db036b23..2206878a4 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1278,13 +1278,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 7d9392af9..126d7a617 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1281,13 +1281,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 130521761..d20904d0d 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1213,13 +1213,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index c6568d520..91199d093 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1270,13 +1270,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index bf65ef603..f9b2d81fa 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1267,13 +1267,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index a21540962..895322ba5 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1274,13 +1274,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 0c0bc9386..8f8e46e1e 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1312,13 +1312,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@@ -1666,9 +1665,6 @@ class OCRoute(OpenShiftCLI):
@staticmethod
def get_cert_data(path, content):
'''get the data for a particular value'''
- if not path and not content:
- return None
-
rval = None
if path and os.path.exists(path) and os.access(path, os.R_OK):
rval = open(path).read()
@@ -1707,14 +1703,14 @@ class OCRoute(OpenShiftCLI):
if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough': # E501
for key, option in files.items():
- if key == 'destcacert' and params['tls_termination'] != 'reencrypt':
+ if not option['path'] and not option['content']:
continue
option['value'] = OCRoute.get_cert_data(option['path'], option['content']) # E501
if not option['value']:
return {'failed': True,
- 'msg': 'Verify that you pass a value for %s' % key}
+ 'msg': 'Verify that you pass a correct value for %s' % key}
rconfig = RouteConfig(params['name'],
params['namespace'],
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index f112b6dd0..7130cc5fc 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1256,13 +1256,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index d762e0c38..0c4b99e30 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1308,13 +1308,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 769b75e15..7ab139e85 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -1315,13 +1315,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 446987eff..5d539ced4 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1254,13 +1254,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index c7eb1986a..97e213f46 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1254,13 +1254,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index 686119c65..9339a85e5 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -1272,13 +1272,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 3a98693b7..2fa349547 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1314,13 +1314,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 939261526..55e1054e7 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1226,13 +1226,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 41e7d0ab8..63bad57b4 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1303,13 +1303,12 @@ class Utils(object): # pragma: no cover
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/src/ansible/oc_adm_csr.py b/roles/lib_openshift/src/ansible/oc_adm_csr.py
new file mode 100644
index 000000000..9e43a810b
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_adm_csr.py
@@ -0,0 +1,36 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for approving certificate signing requests
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='approve', type='str',
+ choices=['approve', 'deny', 'list']),
+ debug=dict(default=False, type='bool'),
+ nodes=dict(default=None, type='list'),
+ timeout=dict(default=30, type='int'),
+ approve_all=dict(default=False, type='bool'),
+ service_account=dict(default='node-bootstrapper', type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['approve_all', 'nodes']],
+ )
+
+ if module.params['nodes'] == []:
+ module.fail_json(**dict(failed=True, msg='Please specify hosts.'))
+
+ rval = OCcsr.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
index 7b81760df..454d7c4b2 100644
--- a/roles/lib_openshift/src/ansible/oc_atomic_container.py
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -19,7 +19,7 @@ def _install(module, container, image, values_list):
if rc != 0:
return rc, out, err, False
else:
- changed = "Extracting" in out
+ changed = "Extracting" in out or "Copying blob" in out
return rc, out, err, changed
def _uninstall(module, name):
@@ -63,7 +63,7 @@ def do_update(module, container, old_image, image, values_list):
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
- changed = "Extracting" in out
+ changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=out, changed=changed)
diff --git a/roles/lib_openshift/src/class/oc_adm_csr.py b/roles/lib_openshift/src/class/oc_adm_csr.py
new file mode 100644
index 000000000..ea11c6ca9
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_csr.py
@@ -0,0 +1,197 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class OCcsr(OpenShiftCLI):
+ ''' Class to wrap the oc adm certificate command line'''
+ kind = 'csr'
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ nodes=None,
+ approve_all=False,
+ service_account=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for oc adm certificate '''
+ super(OCcsr, self).__init__(None, kubeconfig, verbose)
+ self.service_account = service_account
+ self.nodes = self.create_nodes(nodes)
+ self._csrs = []
+ self.approve_all = approve_all
+ self.verbose = verbose
+
+ @property
+ def csrs(self):
+ '''property for managing csrs'''
+ # any processing needed??
+ self._csrs = self._get(resource=self.kind)['results'][0]['items']
+ return self._csrs
+
+ def create_nodes(self, nodes):
+ '''create a node object to track csr signing status'''
+ nodes_list = []
+
+ if nodes is None:
+ return nodes_list
+
+ results = self._get(resource='nodes')['results'][0]['items']
+
+ for node in nodes:
+ nodes_list.append(dict(name=node, csrs={}, accepted=False, denied=False))
+
+ for ocnode in results:
+ if node in ocnode['metadata']['name']:
+ nodes_list[-1]['accepted'] = True
+
+ return nodes_list
+
+ def get(self):
+ '''get the current certificate signing requests'''
+ return self.csrs
+
+ @staticmethod
+ def action_needed(csr, action):
+ '''check to see if csr is in desired state'''
+ if csr['status'] == {}:
+ return True
+
+ state = csr['status']['conditions'][0]['type']
+
+ if action == 'approve' and state != 'Approved':
+ return True
+
+ elif action == 'deny' and state != 'Denied':
+ return True
+
+ return False
+
+ def match_node(self, csr):
+ '''match an inc csr to a node in self.nodes'''
+ for node in self.nodes:
+ # we have a match
+ if node['name'] in csr['metadata']['name']:
+ node['csrs'][csr['metadata']['name']] = csr
+
+ # check that the username is the node and type is 'Approved'
+ if node['name'] in csr['spec']['username'] and csr['status']:
+ if csr['status']['conditions'][0]['type'] == 'Approved':
+ node['accepted'] = True
+ # check type is 'Denied' and mark node as such
+ if csr['status'] and csr['status']['conditions'][0]['type'] == 'Denied':
+ node['denied'] = True
+
+ return node
+
+ return None
+
+ def finished(self):
+ '''determine if there are more csrs to sign'''
+ # if nodes is set and we have nodes then return if all nodes are 'accepted'
+ if self.nodes is not None and len(self.nodes) > 0:
+ return all([node['accepted'] or node['denied'] for node in self.nodes])
+
+ # we are approving everything or we still have nodes outstanding
+ return False
+
+ def manage(self, action):
+ '''run openshift oc adm ca create-server-cert cmd and store results into self.nodes
+
+ we attempt to verify if the node is one that was given to us to accept.
+
+ action - (allow | deny)
+ '''
+
+ results = []
+ # There are 2 types of requests:
+ # - node-bootstrapper-client-ip-172-31-51-246-ec2-internal
+ # The client request allows the client to talk to the api/controller
+ # - node-bootstrapper-server-ip-172-31-51-246-ec2-internal
+ # The server request allows the server to join the cluster
+ # Here we need to determine how to approve/deny
+ # we should query the csrs and verify they are from the nodes we thought
+ for csr in self.csrs:
+ node = self.match_node(csr)
+ # oc adm certificate <approve|deny> csr
+ # there are 3 known states: Denied, Aprroved, {}
+ # verify something is needed by OCcsr.action_needed
+ # if approve_all, then do it
+ # if you passed in nodes, you must have a node that matches
+ if self.approve_all or (node and OCcsr.action_needed(csr, action)):
+ result = self.openshift_cmd(['certificate', action, csr['metadata']['name']], oadm=True)
+ # client should have service account name in username field
+ # server should have node name in username field
+ if node and csr['metadata']['name'] not in node['csrs']:
+ node['csrs'][csr['metadata']['name']] = csr
+
+ # accept node in cluster
+ if node['name'] in csr['spec']['username']:
+ node['accepted'] = True
+
+ results.append(result)
+
+ return results
+
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''run the idempotent ansible code'''
+
+ client = OCcsr(params['nodes'],
+ params['approve_all'],
+ params['service_account'],
+ params['kubeconfig'],
+ params['debug'])
+
+ state = params['state']
+
+ api_rval = client.get()
+
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ if state in ['approve', 'deny']:
+ if check_mode:
+ return {'changed': True,
+ 'msg': "CHECK_MODE: Would have {} the certificate.".format(params['state']),
+ 'state': state}
+
+ all_results = []
+ finished = False
+ timeout = False
+ import time
+ # loop for timeout or block until all nodes pass
+ ctr = 0
+ while True:
+
+ all_results.extend(client.manage(params['state']))
+ if client.finished():
+ finished = True
+ break
+
+ if params['timeout'] == 0:
+ if not params['approve_all']:
+ ctr = 0
+
+ if ctr * 2 > params['timeout']:
+ timeout = True
+ break
+
+ # This provides time for the nodes to send their csr requests between approvals
+ time.sleep(2)
+
+ ctr += 1
+
+ for result in all_results:
+ if result['returncode'] != 0:
+ return {'failed': True, 'msg': all_results}
+
+ return dict(changed=len(all_results) > 0,
+ results=all_results,
+ nodes=client.nodes,
+ state=state,
+ finished=finished,
+ timeout=timeout)
+
+ return {'failed': True,
+ 'msg': 'Unknown state passed. %s' % state}
+
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
index 3935525f1..3a1bd732f 100644
--- a/roles/lib_openshift/src/class/oc_route.py
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -68,9 +68,6 @@ class OCRoute(OpenShiftCLI):
@staticmethod
def get_cert_data(path, content):
'''get the data for a particular value'''
- if not path and not content:
- return None
-
rval = None
if path and os.path.exists(path) and os.access(path, os.R_OK):
rval = open(path).read()
@@ -109,14 +106,14 @@ class OCRoute(OpenShiftCLI):
if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough': # E501
for key, option in files.items():
- if key == 'destcacert' and params['tls_termination'] != 'reencrypt':
+ if not option['path'] and not option['content']:
continue
option['value'] = OCRoute.get_cert_data(option['path'], option['content']) # E501
if not option['value']:
return {'failed': True,
- 'msg': 'Verify that you pass a value for %s' % key}
+ 'msg': 'Verify that you pass a correct value for %s' % key}
rconfig = RouteConfig(params['name'],
params['namespace'],
diff --git a/roles/lib_openshift/src/doc/csr b/roles/lib_openshift/src/doc/csr
new file mode 100644
index 000000000..db72dbda3
--- /dev/null
+++ b/roles/lib_openshift/src/doc/csr
@@ -0,0 +1,80 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_adm_csr
+short_description: Module to approve or deny openshift certificate signing requests
+description:
+ - Wrapper around the openshift `oc adm certificate approve|deny <csr>` command.
+options:
+ state:
+ description:
+ - approve|deny|list Approve, deny, and list are the only supported states for certificates
+ required: false
+ default: present
+ choices:
+ - present
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ nodes:
+ description:
+ - A list of the names of the nodes in which to accept the certificates
+ required: false
+ default: None
+ aliases: []
+ timeout:
+ description:
+ - This flag allows for a timeout value when approving nodes.
+ required: false
+ default: 30
+ aliases: []
+ timeout:
+ description:
+ - This flag allows for a timeout value when doing node approvals.
+ - A zero value for the timeout will block until the nodes have been accepted
+ required: false
+ default: 30
+ aliases: []
+ approve_all:
+ description:
+ - This flag allows for the module to approve all CSRs that are found.
+ - This facilitates testing.
+ required: false
+ default: False
+ aliases: []
+ service_account:
+ description:
+ - This parameter tells the approval process which service account is being used for the requests
+ required: false
+ default: node-bootstrapper
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Approve certificates for node xyz
+ oc_adm_scr:
+ nodes:
+ - xyz
+ timeout: 300
+
+- name: Approve certificates for node xyz
+ oc_adm_scr:
+ nodes:
+ - xyz
+ timeout: 0
+'''
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 16770b22d..5a307cdb3 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -464,13 +464,12 @@ class Utils(object):
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
- import yum
+ import rpm
- yum_base = yum.YumBase()
- if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
- return True
+ transaction_set = rpm.TransactionSet()
+ rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
- return False
+ return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index e9b6bf261..4636f6b9b 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -9,6 +9,16 @@ oc_adm_ca_server_cert.py:
- class/oc_adm_ca_server_cert.py
- ansible/oc_adm_ca_server_cert.py
+oc_adm_csr.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/csr
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_adm_csr.py
+- ansible/oc_adm_csr.py
+
oc_adm_manage_node.py:
- doc/generated
- doc/license
diff --git a/roles/lib_openshift/src/test/integration/oc_adm_csr.yml b/roles/lib_openshift/src/test/integration/oc_adm_csr.yml
new file mode 100755
index 000000000..cad8e36f5
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_adm_csr.yml
@@ -0,0 +1,28 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_adm_csr.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: masters
+ gather_facts: no
+ user: root
+ tasks:
+ - name: list csrs
+ oc_adm_csr:
+ state: list
+ register: csrout
+
+ - debug: var=csrout
+
+ - name: list csrs
+ oc_adm_csr:
+ state: approve
+ nodes:
+ - ip-172-31-51-0-ec2-internal
+ - ip-172-31-51-246-ec2-internal
+ - ip-172-31-54-12-ec2-internal
+ - ip-172-31-58-173-ec2-internal
+ - ip-172-31-58-212-ec2-internal
+ - ip-172-31-51-246-ec2-internal
+ - ip-172-31-54-12-ec2-internal
+
+ register: csrout
+ - debug: var=csrout
diff --git a/roles/lib_utils/library/iam_cert23.py b/roles/lib_utils/library/iam_cert23.py
new file mode 100644
index 000000000..07b3d3bdf
--- /dev/null
+++ b/roles/lib_utils/library/iam_cert23.py
@@ -0,0 +1,314 @@
+#!/usr/bin/python
+# pylint: skip-file
+# flake8: noqa
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: iam_cert
+short_description: Manage server certificates for use on ELBs and CloudFront
+description:
+ - Allows for the management of server certificates
+version_added: "2.0"
+options:
+ name:
+ description:
+ - Name of certificate to add, update or remove.
+ required: true
+ new_name:
+ description:
+ - When state is present, this will update the name of the cert.
+ - The cert, key and cert_chain parameters will be ignored if this is defined.
+ new_path:
+ description:
+ - When state is present, this will update the path of the cert.
+ - The cert, key and cert_chain parameters will be ignored if this is defined.
+ state:
+ description:
+ - Whether to create(or update) or delete certificate.
+ - If new_path or new_name is defined, specifying present will attempt to make an update these.
+ required: true
+ choices: [ "present", "absent" ]
+ path:
+ description:
+ - When creating or updating, specify the desired path of the certificate.
+ default: "/"
+ cert_chain:
+ description:
+ - The path to, or content of the CA certificate chain in PEM encoded format.
+ As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
+ cert:
+ description:
+ - The path to, or content of the certificate body in PEM encoded format.
+ As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
+ key:
+ description:
+ - The path to, or content of the private key in PEM encoded format.
+ As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
+ dup_ok:
+ description:
+ - By default the module will not upload a certificate that is already uploaded into AWS.
+ If set to True, it will upload the certificate as long as the name is unique.
+ default: False
+
+
+requirements: [ "boto" ]
+author: Jonathan I. Davila
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Basic server certificate upload from local file
+- iam_cert:
+ name: very_ssl
+ state: present
+ cert: "{{ lookup('file', 'path/to/cert') }}"
+ key: "{{ lookup('file', 'path/to/key') }}"
+ cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
+
+# Basic server certificate upload
+- iam_cert:
+ name: very_ssl
+ state: present
+ cert: path/to/cert
+ key: path/to/key
+ cert_chain: path/to/certchain
+
+# Server certificate upload using key string
+- iam_cert:
+ name: very_ssl
+ state: present
+ path: "/a/cert/path/"
+ cert: body_of_somecert
+ key: vault_body_of_privcertkey
+ cert_chain: body_of_myverytrustedchain
+
+# Basic rename of existing certificate
+- iam_cert:
+ name: very_ssl
+ new_name: new_very_ssl
+ state: present
+
+'''
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws
+import os
+
+try:
+ import boto
+ import boto.iam
+ import boto.ec2
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def boto_exception(err):
+ '''generic error message handler'''
+ if hasattr(err, 'error_message'):
+ error = err.error_message
+ elif hasattr(err, 'message'):
+ error = err.message
+ else:
+ error = '%s: %s' % (Exception, err)
+
+ return error
+
+
+def cert_meta(iam, name):
+ certificate = iam.get_server_certificate(name).get_server_certificate_result.server_certificate
+ ocert = certificate.certificate_body
+ opath = certificate.server_certificate_metadata.path
+ ocert_id = certificate.server_certificate_metadata.server_certificate_id
+ upload_date = certificate.server_certificate_metadata.upload_date
+ exp = certificate.server_certificate_metadata.expiration
+ arn = certificate.server_certificate_metadata.arn
+ return opath, ocert, ocert_id, upload_date, exp, arn
+
+
+def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
+ update = False
+
+ # IAM cert names are case insensitive
+ names_lower = [n.lower() for n in [name, new_name] if n is not None]
+ orig_cert_names_lower = [ocn.lower() for ocn in orig_cert_names]
+
+ if any(ct in orig_cert_names_lower for ct in names_lower):
+ for i_name in names_lower:
+ if cert is not None:
+ try:
+ c_index = orig_cert_names_lower.index(i_name)
+ except NameError:
+ continue
+ else:
+ # NOTE: remove the carriage return to strictly compare the cert bodies.
+ slug_cert = cert.replace('\r', '')
+ slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '')
+ if slug_orig_cert_bodies == slug_cert:
+ update = True
+ break
+ elif slug_cert.startswith(slug_orig_cert_bodies):
+ update = True
+ break
+ elif slug_orig_cert_bodies != slug_cert:
+ module.fail_json(changed=False, msg='A cert with the name %s already exists and'
+ ' has a different certificate body associated'
+ ' with it. Certificates cannot have the same name' % orig_cert_names[c_index])
+ else:
+ update = True
+ break
+ elif cert in orig_cert_bodies and not dup_ok:
+ for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
+ if crt_body == cert:
+ module.fail_json(changed=False, msg='This certificate already'
+ ' exists under the name %s' % crt_name)
+
+ return update
+
+
+def cert_action(module, iam, name, cpath, new_name, new_path, state,
+ cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok):
+ if state == 'present':
+ update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
+ orig_cert_bodies, dup_ok)
+ if update:
+ opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name)
+ changed = True
+ if new_name and new_path:
+ iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
+ module.exit_json(changed=changed, original_name=name, new_name=new_name,
+ original_path=opath, new_path=new_path, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn)
+ elif new_name and not new_path:
+ iam.update_server_cert(name, new_cert_name=new_name)
+ module.exit_json(changed=changed, original_name=name, new_name=new_name,
+ cert_path=opath, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn)
+ elif not new_name and new_path:
+ iam.update_server_cert(name, new_path=new_path)
+ module.exit_json(changed=changed, name=new_name,
+ original_path=opath, new_path=new_path, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn)
+ else:
+ changed = False
+ module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn,
+ msg='No new path or name specified. No changes made')
+ else:
+ changed = True
+ iam.upload_server_cert(name, cert, key, cert_chain=cert_chain, path=cpath)
+ opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name)
+ module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
+ upload_date=upload_date, expiration_date=exp, arn=arn)
+ elif state == 'absent':
+ if name in orig_cert_names:
+ changed = True
+ iam.delete_server_cert(name)
+ module.exit_json(changed=changed, deleted_cert=name)
+ else:
+ changed = False
+ module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name)
+
+
+def load_data(cert, key, cert_chain):
+ # if paths are provided rather than lookups read the files and return the contents
+ if cert and os.path.isfile(cert):
+ cert = open(cert, 'r').read().rstrip()
+ if key and os.path.isfile(key):
+ key = open(key, 'r').read().rstrip()
+ if cert_chain and os.path.isfile(cert_chain):
+ cert_chain = open(cert_chain, 'r').read()
+ return cert, key, cert_chain
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(),
+ cert=dict(),
+ key=dict(no_log=True),
+ cert_chain=dict(),
+ new_name=dict(),
+ path=dict(default='/'),
+ new_path=dict(),
+ dup_ok=dict(type='bool')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['new_path', 'key'],
+ ['new_path', 'cert'],
+ ['new_path', 'cert_chain'],
+ ['new_name', 'key'],
+ ['new_name', 'cert'],
+ ['new_name', 'cert_chain'],
+ ],
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg="Boto is required for this module")
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+
+ try:
+ if region:
+ iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
+ else:
+ iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+ path = module.params.get('path')
+ new_name = module.params.get('new_name')
+ new_path = module.params.get('new_path')
+ dup_ok = module.params.get('dup_ok')
+ if state == 'present' and not new_name and not new_path:
+ cert, key, cert_chain = load_data(cert=module.params.get('cert'),
+ key=module.params.get('key'),
+ cert_chain=module.params.get('cert_chain'))
+ else:
+ cert = key = cert_chain = None
+
+ orig_cert_names = [ctb['server_certificate_name'] for ctb in
+ iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list]
+ orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body
+ for thing in orig_cert_names]
+ if new_name == name:
+ new_name = None
+ if new_path == path:
+ new_path = None
+
+ changed = False
+ try:
+ cert_action(module, iam, name, path, new_name, new_path, state,
+ cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok)
+ except boto.exception.BotoServerError as err:
+ module.fail_json(changed=changed, msg=str(err), debug=[cert, key])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/oo_iam_kms.py b/roles/lib_utils/library/oo_iam_kms.py
new file mode 100644
index 000000000..c85745f01
--- /dev/null
+++ b/roles/lib_utils/library/oo_iam_kms.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+'''
+ansible module for creating AWS IAM KMS keys
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# AWS IAM KMS ansible module
+#
+#
+# Copyright 2016 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Jenkins environment doesn't have all the required libraries
+# pylint: disable=import-error
+import time
+import boto3
+# Ansible modules need this wildcard import
+# pylint: disable=unused-wildcard-import, wildcard-import, redefined-builtin
+from ansible.module_utils.basic import AnsibleModule
+
+AWS_ALIAS_URL = "http://docs.aws.amazon.com/kms/latest/developerguide/programming-aliases.html"
+
+
+class AwsIamKms(object):
+ '''
+ ansible module for AWS IAM KMS
+ '''
+
+ def __init__(self):
+ ''' constructor '''
+ self.module = None
+ self.kms_client = None
+ self.aliases = None
+
+ @staticmethod
+ def valid_alias_name(user_alias):
+ ''' AWS KMS aliases must start with 'alias/' '''
+ valid_start = 'alias/'
+ if user_alias.startswith(valid_start):
+ return True
+
+ return False
+
+ def get_all_kms_info(self):
+ '''fetch all kms info and return them
+
+ list_keys doesn't have information regarding aliases
+ list_aliases doesn't have the full kms arn
+
+ fetch both and join them on the targetKeyId
+ '''
+ aliases = self.kms_client.list_aliases()['Aliases']
+ keys = self.kms_client.list_keys()['Keys']
+
+ for alias in aliases:
+ for key in keys:
+ if 'TargetKeyId' in alias and 'KeyId' in key:
+ if alias['TargetKeyId'] == key['KeyId']:
+ alias.update(key)
+
+ return aliases
+
+ def get_kms_entry(self, user_alias, alias_list):
+ ''' return single alias details from list of aliases '''
+ for alias in alias_list:
+ if user_alias == alias.get('AliasName', False):
+ return alias
+
+ msg = "Did not find alias {}".format(user_alias)
+ self.module.exit_json(failed=True, results=msg)
+
+ @staticmethod
+ def exists(user_alias, alias_list):
+ ''' Check if KMS alias already exists '''
+ for alias in alias_list:
+ if user_alias == alias.get('AliasName'):
+ return True
+
+ return False
+
+ def main(self):
+ ''' entry point for module '''
+
+ self.module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='list', choices=['list', 'present'], type='str'),
+ region=dict(default=None, required=True, type='str'),
+ alias=dict(default=None, type='str'),
+ # description default cannot be None
+ description=dict(default='', type='str'),
+ aws_access_key=dict(default=None, type='str'),
+ aws_secret_key=dict(default=None, type='str'),
+ ),
+ )
+
+ state = self.module.params['state']
+ aws_access_key = self.module.params['aws_access_key']
+ aws_secret_key = self.module.params['aws_secret_key']
+ if aws_access_key and aws_secret_key:
+ boto3.setup_default_session(aws_access_key_id=aws_access_key,
+ aws_secret_access_key=aws_secret_key,
+ region_name=self.module.params['region'])
+ else:
+ boto3.setup_default_session(region_name=self.module.params['region'])
+
+ self.kms_client = boto3.client('kms')
+
+ aliases = self.get_all_kms_info()
+
+ if state == 'list':
+ if self.module.params['alias'] is not None:
+ user_kms = self.get_kms_entry(self.module.params['alias'],
+ aliases)
+ self.module.exit_json(changed=False, results=user_kms,
+ state="list")
+ else:
+ self.module.exit_json(changed=False, results=aliases,
+ state="list")
+
+ if state == 'present':
+
+ # early sanity check to make sure the alias name conforms with
+ # AWS alias name requirements
+ if not self.valid_alias_name(self.module.params['alias']):
+ self.module.exit_json(failed=True, changed=False,
+ results="Alias must start with the prefix " +
+ "'alias/'. Please see " + AWS_ALIAS_URL,
+ state='present')
+
+ if not self.exists(self.module.params['alias'], aliases):
+ # if we didn't find it, create it
+ response = self.kms_client.create_key(KeyUsage='ENCRYPT_DECRYPT',
+ Description=self.module.params['description'])
+ kid = response['KeyMetadata']['KeyId']
+ response = self.kms_client.create_alias(AliasName=self.module.params['alias'],
+ TargetKeyId=kid)
+ # sleep for a bit so that the KMS data can be queried
+ time.sleep(10)
+ # get details for newly created KMS entry
+ new_alias_list = self.kms_client.list_aliases()['Aliases']
+ user_kms = self.get_kms_entry(self.module.params['alias'],
+ new_alias_list)
+
+ self.module.exit_json(changed=True, results=user_kms,
+ state='present')
+
+ # already exists, normally we would check whether we need to update it
+ # but this module isn't written to allow changing the alias name
+ # or changing whether the key is enabled/disabled
+ user_kms = self.get_kms_entry(self.module.params['alias'], aliases)
+ self.module.exit_json(changed=False, results=user_kms,
+ state="present")
+
+ self.module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+
+if __name__ == '__main__':
+ AwsIamKms().main()
diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml
index a7803c0ee..919e3aa7b 100644
--- a/roles/nuage_common/defaults/main.yaml
+++ b/roles/nuage_common/defaults/main.yaml
@@ -10,5 +10,8 @@ nuage_ca_serial: "{{ nuage_ca_dir }}/nuageMonCA.serial.txt"
nuage_master_mon_dir: /usr/share/nuage-openshift-monitor
nuage_node_plugin_dir: /usr/share/vsp-openshift
+nuage_node_cni_bin_dir: /opt/cni/bin
+nuage_node_cni_netconf_dir: /etc/cni/net.d
+
nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"
nuage_mon_cert_validity_period: "{{ nuage_cert_validity_period | default('3650') }}"
diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml
new file mode 100644
index 000000000..6c8c9f8d2
--- /dev/null
+++ b/roles/nuage_common/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+- name: Set the Nuage plugin openshift directory fact to handle Atomic host install
+ set_fact:
+ nuage_node_plugin_dir: /var/usr/share/vsp-openshift
+ when: openshift.common.is_atomic | bool
+
+- name: Set the Nuage CNI network config directory fact to handle Atomic host install
+ set_fact:
+ nuage_node_cni_netconf_dir: /var/etc/cni/net.d/
+ when: openshift.common.is_atomic | bool
+
+- name: Set the Nuage CNI binary directory fact to handle Atomic host install
+ set_fact:
+ nuage_node_cni_bin_dir: /var/opt/cni/bin/
+ when: openshift.common.is_atomic | bool
+
+- name: Assure CNI plugin config dir exists before daemon set install
+ become: yes
+ file: path="{{ nuage_node_plugin_dir }}" state=directory
+
+- name: Assure CNI netconf directory exists before daemon set install
+ become: yes
+ file: path="{{ nuage_node_cni_netconf_dir }}" state=directory
+
+- name: Assure CNI plugin binary directory exists before daemon set install
+ become: yes
+ file: path="{{ nuage_node_cni_bin_dir }}" state=directory
diff --git a/roles/nuage_master/defaults/main.yml b/roles/nuage_master/defaults/main.yml
index ffab25775..5f1d8686a 100644
--- a/roles/nuage_master/defaults/main.yml
+++ b/roles/nuage_master/defaults/main.yml
@@ -1,6 +1,6 @@
---
-r_nuage_master_firewall_enabled: True
-r_nuage_master_use_firewalld: False
+r_nuage_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_nuage_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
nuage_mon_rest_server_port: '9443'
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index ad7bbb111..21da6b953 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,8 +1,4 @@
---
-- name: restart nuage-openshift-monitor
- become: yes
- systemd: name=nuage-openshift-monitor state=restarted
-
- name: restart master api
systemd: name={{ openshift.common.service_type }}-master-api state=restarted
when: >
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index d0363c981..f3c487132 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -3,17 +3,64 @@
include: firewall.yml
static: yes
+- name: Set the Nuage certificate directory fact for Atomic hosts
+ set_fact:
+ cert_output_dir: /var/usr/share/nuage-openshift-monitor
+ when: openshift.common.is_atomic | bool
+
+- name: Set the Nuage kubeconfig file path fact for Atomic hosts
+ set_fact:
+ kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig
+ when: openshift.common.is_atomic | bool
+
+- name: Set the Nuage monitor yaml location fact for Atomic hosts
+ set_fact:
+ kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
+ when: openshift.common.is_atomic | bool
+
+- name: Set the Nuage monitor certs location fact for Atomic hosts
+ set_fact:
+ nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/
+ when: openshift.common.is_atomic | bool
+
+- name: Set the Nuage master config directory for daemon sets install
+ set_fact:
+ nuage_master_config_dsets_mount_dir: /var/usr/share/
+ when: master_host_type == "is_atomic"
+
+- name: Set the Nuage node config directory for daemon sets install
+ set_fact:
+ nuage_node_config_dsets_mount_dir: /var/usr/share/
+ when: slave_host_type == "is_atomic"
+
+- name: Set the Nuage CNI plugin binary directory for daemon sets install
+ set_fact:
+ nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin
+ when: openshift.common.is_atomic | bool
+
- name: Create directory /usr/share/nuage-openshift-monitor
become: yes
file: path=/usr/share/nuage-openshift-monitor state=directory
+ when: not openshift.common.is_atomic | bool
-- name: Create the log directory
+- name: Create directory /var/usr/share/nuage-openshift-monitor
become: yes
- file: path={{ nuage_mon_rest_server_logdir }} state=directory
+ file: path=/var/usr/share/nuage-openshift-monitor state=directory
+ when: openshift.common.is_atomic | bool
+
+- name: Create directory /var/usr/bin for monitor binary on atomic
+ become: yes
+ file: path=/var/usr/bin state=directory
+ when: openshift.common.is_atomic | bool
-- name: Install Nuage Openshift Monitor
+- name: Create CNI bin directory /var/opt/cni/bin
become: yes
- yum: name={{ nuage_openshift_rpm }} state=present
+ file: path=/var/opt/cni/bin state=directory
+ when: openshift.common.is_atomic | bool
+
+- name: Create the log directory
+ become: yes
+ file: path={{ nuage_mon_rest_server_logdir }} state=directory
- include: serviceaccount.yml
@@ -45,10 +92,32 @@
become: yes
copy: src="{{ vsd_user_key_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_key_file | basename }}"
-- name: Create nuage-openshift-monitor.yaml
+- name: Create Nuage master daemon set yaml file
+ become: yes
+ template: src=nuage-master-config-daemonset.j2 dest=/etc/nuage-master-config-daemonset.yaml owner=root mode=0644
+
+- name: Create Nuage node daemon set yaml file
become: yes
- template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644
+ template: src=nuage-node-config-daemonset.j2 dest=/etc/nuage-node-config-daemonset.yaml owner=root mode=0644
+
+- name: Add the service account to the privileged scc to have root permissions
+ shell: oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-infra:daemonset-controller
+ ignore_errors: true
+ when: inventory_hostname == groups.oo_first_master.0
+
+- name: Spawn Nuage Master monitor daemon sets pod
+ shell: oc create -f /etc/nuage-master-config-daemonset.yaml
+ ignore_errors: true
+ when: inventory_hostname == groups.oo_first_master.0
+
+- name: Spawn Nuage CNI daemon sets pod
+ shell: oc create -f /etc/nuage-node-config-daemonset.yaml
+ ignore_errors: true
+ when: inventory_hostname == groups.oo_first_master.0
+
+- name: Restart daemons
+ command: /bin/true
notify:
- restart master api
- restart master controllers
- - restart nuage-openshift-monitor
+ ignore_errors: true
diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
new file mode 100755
index 000000000..612d689c2
--- /dev/null
+++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
@@ -0,0 +1,111 @@
+# This ConfigMap is used to configure Nuage VSP configuration on master nodes
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nuage-master-config
+ namespace: kube-system
+data:
+ # This will generate the required Nuage configuration
+ # on master nodes
+ monitor_yaml_config: |
+
+ # .kubeconfig that includes the nuage service account
+ kubeConfig: {{ nuage_master_crt_dir }}/nuage.kubeconfig
+ # name of the nuage service account, or another account with 'cluster-reader'
+ # permissions
+ # Openshift master config file
+ masterConfig: /etc/origin/master/master-config.yaml
+ # URL of the VSD Architect
+ vsdApiUrl: {{ vsd_api_url }}
+ # API version to query against. Usually "v3_2"
+ vspVersion: {{ vsp_version }}
+ # Name of the enterprise in which pods will reside
+ enterpriseName: {{ enterprise }}
+ # Name of the domain in which pods will reside
+ domainName: {{ domain }}
+ # VSD generated user certificate file location on master node
+ userCertificateFile: {{ nuage_master_crt_dir }}/{{ vsd_user }}.pem
+ # VSD generated user key file location on master node
+ userKeyFile: {{ nuage_master_crt_dir }}/{{ vsd_user }}-Key.pem
+ # Location where logs should be saved
+ log_dir: /var/log/nuage-openshift-monitor
+ # Monitor rest server parameters
+ # Logging level for the nuage openshift monitor
+ # allowed options are: 0 => INFO, 1 => WARNING, 2 => ERROR, 3 => FATAL
+ logLevel: 0
+ # Parameters related to the nuage monitor REST server
+ nuageMonServer:
+ URL: 0.0.0.0:9443
+ certificateDirectory: {{ nuage_master_crt_dir }}
+ # etcd config required for HA
+ etcdClientConfig:
+ ca: {{ nuage_master_crt_dir }}/nuageMonCA.crt
+ certFile: {{ nuage_master_crt_dir }}/nuageMonServer.crt
+ keyFile: {{ nuage_master_crt_dir }}/master.etcd-client.key
+ urls:
+ {% for etcd_url in openshift.master.etcd_urls %}
+ - {{ etcd_url }}
+ {% endfor %}
+
+---
+
+# This manifest installs Nuage master node configuration on
+# each Nuage master node in a cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: nuage-master-config
+ namespace: kube-system
+ labels:
+ k8s-app: nuage-master-config
+spec:
+ selector:
+ matchLabels:
+ k8s-app: nuage-master-config
+ template:
+ metadata:
+ labels:
+ k8s-app: nuage-master-config
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ operator: Exists
+ nodeSelector:
+ install-monitor: "true"
+ containers:
+ # This container configures Nuage Master node
+ - name: install-nuage-master-config
+ image: nuage/master:{{ nuage_monitor_container_image_version }}
+ ports:
+ - containerPort: 9443
+ hostPort: 9443
+ command: ["/configure-master.sh"]
+ args: ["ose", "{{ master_host_type }}"]
+ securityContext:
+ privileged: true
+ env:
+ # nuage-openshift-monitor.yaml config to install on each slave node.
+ - name: NUAGE_MASTER_VSP_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: nuage-master-config
+ key: monitor_yaml_config
+ volumeMounts:
+ - mountPath: /var/log
+ name: cni-log-dir
+ - mountPath: {{ nuage_master_config_dsets_mount_dir }}
+ name: usr-share-dir
+ - mountPath: /etc/origin/
+ name: master-config-dir
+ volumes:
+ - name: cni-log-dir
+ hostPath:
+ path: /var/log
+ - name: usr-share-dir
+ hostPath:
+ path: {{ nuage_master_config_dsets_mount_dir }}
+ - name: master-config-dir
+ hostPath:
+ path: /etc/origin/
diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
new file mode 100755
index 000000000..02e9a1563
--- /dev/null
+++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
@@ -0,0 +1,206 @@
+# This ConfigMap is used to configure Nuage VSP configuration
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nuage-config
+ namespace: kube-system
+data:
+ # This will generate the required Nuage vsp-openshift.yaml
+ # config on each slave node
+ plugin_yaml_config: |
+ clientCert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/client.crt
+ # The key to the certificate in clientCert above
+ clientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/client.key
+ # The certificate authority's certificate for the local kubelet. Usually the
+ # same as the CA cert used to create the client Cert/Key pair.
+ CACert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/ca.crt
+ # Name of the enterprise in which pods will reside
+ enterpriseName: {{ enterprise }}
+ # Name of the domain in which pods will reside
+ domainName: {{ domain }}
+ # Name of the VSD user in admin group
+ vsdUser: {{ vsd_user }}
+ # IP address and port number of master API server
+ masterApiServer: {{ api_server_url }}
+ # REST server URL
+ nuageMonRestServer: {{ nuage_mon_rest_server_url }}
+ # Bridge name for the docker bridge
+ dockerBridgeName: docker0
+ # Certificate for connecting to the openshift monitor REST api
+ nuageMonClientCert: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.crt
+ # Key to the certificate in restClientCert
+ nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key
+ # CA certificate for verifying the master's rest server
+ nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt
+ # Nuage vport mtu size
+ interfaceMTU: {{ nuage_vport_mtu }}
+ # Logging level for the plugin
+ # allowed options are: "dbg", "info", "warn", "err", "emer", "off"
+ logLevel: 3
+
+ # This will generate the required Nuage CNI yaml configuration
+ cni_yaml_config: |
+ vrsendpoint: "/var/run/openvswitch/db.sock"
+ vrsbridge: "alubr0"
+ monitorinterval: 60
+ cniversion: 0.2.0
+ loglevel: "info"
+ portresolvetimer: 60
+ logfilesize: 1
+ vrsconnectionchecktimer: 180
+ mtu: 1450
+ staleentrytimeout: 600
+
+---
+
+# This manifest installs Nuage CNI plugins and network config on
+# each worker node in Openshift cluster
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: nuage-cni-ds
+ namespace: kube-system
+ labels:
+ k8s-app: nuage-cni-ds
+spec:
+ selector:
+ matchLabels:
+ k8s-app: nuage-cni-ds
+ template:
+ metadata:
+ labels:
+ k8s-app: nuage-cni-ds
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ operator: Exists
+ containers:
+ # This container installs Nuage CNI binaries
+ # and CNI network config file on each node.
+ - name: install-nuage-cni
+ image: nuage/cni:{{ nuage_cni_container_image_version }}
+ command: ["/install-cni.sh"]
+ args: ["nuage-cni-openshift", "{{ slave_host_type }}"]
+ securityContext:
+ privileged: true
+ env:
+ # Nuage vsp-openshift.yaml config to install on each slave node.
+ - name: NUAGE_VSP_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: nuage-config
+ key: plugin_yaml_config
+ # Nuage nuage-cni.yaml config to install on each slave node.
+ - name: NUAGE_CNI_YAML_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: nuage-config
+ key: cni_yaml_config
+ # Nuage cluster network CIDR for iptables configuration
+ - name: NUAGE_CLUSTER_NW_CIDR
+ value: "{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ - mountPath: /etc/default
+ name: cni-yaml-dir
+ - mountPath: /var/run
+ name: var-run-dir
+ - mountPath: /var/log
+ name: cni-log-dir
+ - mountPath: {{ nuage_node_config_dsets_mount_dir }}
+ name: usr-share-dir
+ volumes:
+ - name: cni-bin-dir
+ hostPath:
+ path: {{ nuage_cni_bin_dsets_mount_dir }}
+ - name: cni-net-dir
+ hostPath:
+ path: {{ nuage_cni_netconf_dsets_mount_dir }}
+ - name: cni-yaml-dir
+ hostPath:
+ path: /etc/default
+ - name: var-run-dir
+ hostPath:
+ path: /var/run
+ - name: cni-log-dir
+ hostPath:
+ path: /var/log
+ - name: usr-share-dir
+ hostPath:
+ path: {{ nuage_node_config_dsets_mount_dir }}
+
+---
+
+# This manifest installs Nuage VRS on
+# each worker node in an Openshift cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: nuage-vrs-ds
+ namespace: kube-system
+ labels:
+ k8s-app: nuage-vrs-ds
+spec:
+ selector:
+ matchLabels:
+ k8s-app: nuage-vrs-ds
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: nuage-vrs-ds
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ operator: Exists
+ containers:
+ # This container installs Nuage VRS running as a
+ # container on each worker node
+ - name: install-nuage-vrs
+ image: nuage/vrs:{{ nuage_vrs_container_image_version }}
+ securityContext:
+ privileged: true
+ env:
+ # Configure parameters for VRS openvswitch file
+ - name: NUAGE_ACTIVE_CONTROLLER
+ value: "{{ vsc_active_ip }}"
+ - name: NUAGE_STANDBY_CONTROLLER
+ value: "{{ vsc_standby_ip }}"
+ - name: NUAGE_PLATFORM
+ value: '"kvm, k8s"'
+ - name: NUAGE_K8S_SERVICE_IPV4_SUBNET
+ value: '192.168.0.0\/16'
+ - name: NUAGE_NETWORK_UPLINK_INTF
+ value: "eth0"
+ volumeMounts:
+ - mountPath: /var/run
+ name: vrs-run-dir
+ - mountPath: /var/log
+ name: vrs-log-dir
+ - mountPath: /sys/module
+ name: sys-mod-dir
+ readOnly: true
+ - mountPath: /lib/modules
+ name: lib-mod-dir
+ readOnly: true
+ volumes:
+ - name: vrs-run-dir
+ hostPath:
+ path: /var/run
+ - name: vrs-log-dir
+ hostPath:
+ path: /var/log
+ - name: sys-mod-dir
+ hostPath:
+ path: /sys/module
+ - name: lib-mod-dir
+ hostPath:
+ path: /lib/modules
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
deleted file mode 100644
index e077128a4..000000000
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-# .kubeconfig that includes the nuage service account
-kubeConfig: {{ kube_config }}
-# name of the nuage service account, or another account with 'cluster-reader'
-# permissions
-# Openshift master config file
-masterConfig: {{ master_config_yaml }}
-# URL of the VSD Architect
-vsdApiUrl: {{ vsd_api_url }}
-# API version to query against. Usually "v3_2"
-vspVersion: {{ vsp_version }}
-# File containing a VSP license to install. Only necessary if no license has
-# been installed on the VSD Architect before, only valid for standalone vsd install
-# licenseFile: "/path/to/base_vsp_license.txt"
-# Name of the enterprise in which pods will reside
-enterpriseName: {{ enterprise }}
-# Name of the domain in which pods will reside
-domainName: {{ domain }}
-# VSD generated user certificate file location on master node
-userCertificateFile: {{ cert_output_dir }}/{{ vsd_user_cert_file | basename }}
-# VSD generated user key file location on master node
-userKeyFile: {{ cert_output_dir }}/{{ vsd_user_key_file | basename }}
-# Location where logs should be saved
-log_dir: {{ nuage_mon_rest_server_logdir }}
-# Monitor rest server parameters
-# Logging level for the nuage openshift monitor
-# allowed options are: 0 => INFO, 1 => WARNING, 2 => ERROR, 3 => FATAL
-logLevel: {{ nuage_mon_log_level }}
-# Parameters related to the nuage monitor REST server
-nuageMonServer:
- URL: {{ nuage_mon_rest_server_url }}
- certificateDirectory: {{ cert_output_dir }}
-# etcd config required for HA
-etcdClientConfig:
- ca: {{ openshift_master_config_dir }}/{{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
- certFile: {{ openshift_master_config_dir }}/master.etcd-client.crt
- keyFile: {{ openshift_master_config_dir }}/master.etcd-client.key
- urls:
-{% for etcd_url in openshift.master.etcd_urls %}
- - {{ etcd_url }}
-{% endfor %}
-
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index 57d5d2595..114514d7c 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -22,6 +22,18 @@ nuage_mon_rest_server_host: "{{ openshift.master.cluster_hostname | default(open
nuage_master_crt_dir: /usr/share/nuage-openshift-monitor
nuage_service_account: system:serviceaccount:default:nuage
+nuage_master_config_dsets_mount_dir: /usr/share/
+nuage_node_config_dsets_mount_dir: /usr/share/
+nuage_cni_bin_dsets_mount_dir: /opt/cni/bin
+nuage_cni_netconf_dsets_mount_dir: /etc/cni/net.d
+nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.1.1') }}"
+nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.1.1') }}"
+nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.1.1') }}"
+api_server_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+nuage_vport_mtu: "{{ nuage_interface_mtu | default('1460') }}"
+master_host_type: "{{ master_base_host_type | default('is_rhel_server') }}"
+slave_host_type: "{{ slave_base_host_type | default('is_rhel_server') }}"
+
nuage_tasks:
- resource_kind: cluster-role
resource_name: cluster-reader
diff --git a/roles/nuage_node/defaults/main.yml b/roles/nuage_node/defaults/main.yml
index b3d2e3cec..9a2e34387 100644
--- a/roles/nuage_node/defaults/main.yml
+++ b/roles/nuage_node/defaults/main.yml
@@ -1,6 +1,6 @@
---
-r_nuage_node_firewall_enabled: True
-r_nuage_node_use_firewalld: False
+r_nuage_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_nuage_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
nuage_mon_rest_server_port: '9443'
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index 8384856ff..e68ae74bd 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,11 +1,7 @@
---
-- name: restart vrs
- become: yes
- systemd: name=openvswitch state=restarted
-
- name: restart node
become: yes
- systemd: name={{ openshift.common.service_type }}-node state=restarted
+ systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted
- name: save iptable rules
become: yes
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index 66d6ef4ca..9db9dbb6a 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -1,28 +1,18 @@
---
-- name: Install Nuage VRS
- become: yes
- yum: name={{ vrs_rpm }} state=present
-
-- name: Set the uplink interface
- become: yes
- lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
-
-- name: Set the Active Controller
- become: yes
- lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
-
-- name: Set the K8S/OSE Cluster service CIDR
- become: yes
- lineinfile: dest={{ vrs_config }} regexp=^K8S_SERVICE_IPV4_SUBNET line='K8S_SERVICE_IPV4_SUBNET={{ k8s_cluster_service_cidr }}'
+- name: Set the Nuage plugin openshift directory fact for Atomic hosts
+ set_fact:
+ vsp_openshift_dir: /var/usr/share/vsp-openshift
+ when: openshift.common.is_atomic | bool
-- name: Set the Standby Controller
- become: yes
- lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
- when: vsc_standby_ip is defined
+- name: Set the Nuage CNI binary directory fact for Atomic hosts
+ set_fact:
+ cni_bin_dir: /var/opt/cni/bin/
+ when: openshift.common.is_atomic | bool
-- name: Install plugin rpm
- become: yes
- yum: name={{ plugin_rpm }} state=present
+- name: Set the Nuage plugin certs directory fact for Atomic hosts
+ set_fact:
+ nuage_plugin_crt_dir: /var/usr/share/vsp-openshift
+ when: openshift.common.is_atomic | bool
- name: Assure CNI conf dir exists
become: yes
@@ -32,13 +22,6 @@
become: yes
file: path="{{ cni_bin_dir }}" state=directory
-- name: Install CNI loopback plugin
- become: yes
- copy:
- src: "{{ k8s_cni_loopback_plugin }}"
- dest: "{{ cni_bin_dir }}/{{ k8s_cni_loopback_plugin | basename }}"
- mode: 0755
-
- name: Copy the certificates and keys
become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
@@ -50,12 +33,16 @@
- include: certificates.yml
-- name: Set the vsp-openshift.yaml
+- name: Add additional Docker mounts for Nuage for atomic hosts
become: yes
- template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
+ lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}"
+ when: openshift.common.is_atomic | bool
+
+- name: Restart node services
+ command: /bin/true
notify:
- - restart vrs
- restart node
+ ignore_errors: true
- include: iptables.yml
diff --git a/roles/nuage_node/templates/vsp-openshift.j2 b/roles/nuage_node/templates/vsp-openshift.j2
deleted file mode 100644
index f6bccebc2..000000000
--- a/roles/nuage_node/templates/vsp-openshift.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-clientCert: {{ client_cert }}
-# The key to the certificate in clientCert above
-clientKey: {{ client_key }}
-# The certificate authority's certificate for the local kubelet. Usually the
-# same as the CA cert used to create the client Cert/Key pair.
-CACert: {{ ca_cert }}
-# Name of the enterprise in which pods will reside
-enterpriseName: {{ enterprise }}
-# Name of the domain in which pods will reside
-domainName: {{ domain }}
-# Name of the VSD user in admin group
-vsdUser: {{ vsd_user }}
-# IP address and port number of master API server
-masterApiServer: {{ api_server }}
-# REST server URL
-nuageMonRestServer: {{ nuage_mon_rest_server_url }}
-# Bridge name for the docker bridge
-dockerBridgeName: {{ docker_bridge }}
-# Certificate for connecting to the kubemon REST API
-nuageMonClientCert: {{ rest_client_cert }}
-# Key to the certificate in restClientCert
-nuageMonClientKey: {{ rest_client_key }}
-# CA certificate for verifying the master's rest server
-nuageMonServerCA: {{ rest_server_ca_cert }}
-# Nuage vport mtu size
-interfaceMTU: {{ vport_mtu }}
-# Logging level for the plugin
-# allowed options are: "dbg", "info", "warn", "err", "emer", "off"
-logLevel: {{ plugin_log_level }}
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index 4cf68411f..d8bfca62a 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -23,3 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
+openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
+nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md
new file mode 100644
index 000000000..111818451
--- /dev/null
+++ b/roles/openshift_aws_ami_copy/README.md
@@ -0,0 +1,50 @@
+openshift_aws_ami_perms
+=========
+
+Ansible role for copying an AMI
+
+Requirements
+------------
+
+Ansible Modules:
+
+
+Role Variables
+--------------
+
+- openshift_aws_ami_copy_src_ami: source AMI id to copy from
+- openshift_aws_ami_copy_region: region where the AMI is found
+- openshift_aws_ami_copy_name: name to assign to new AMI
+- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption
+- openshift_aws_ami_copy_tags: dict with desired tags
+- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status. This fails due to boto waiters.
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+```yaml
+ - name: copy the ami for encrypted disks
+ include_role:
+ name: openshift_aws_ami_copy
+ vars:
+ r_openshift_aws_ami_copy_region: us-east-1
+ r_openshift_aws_ami_copy_name: myami
+ r_openshift_aws_ami_copy_src_ami: ami-1234
+ r_openshift_aws_ami_copy_kms_arn: arn:xxxx
+ r_openshift_aws_ami_copy_tags: {}
+ r_openshift_aws_ami_copy_encrypt: False
+
+```
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml
new file mode 100644
index 000000000..bcccd4042
--- /dev/null
+++ b/roles/openshift_aws_ami_copy/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+- fail:
+ msg: "{{ item }} needs to be defined"
+ when: item is not defined
+ with_items:
+ - r_openshift_aws_ami_copy_src_ami
+ - r_openshift_aws_ami_copy_name
+ - r_openshift_aws_ami_copy_region
+
+- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}"
+ ec2_ami_copy:
+ region: "{{ r_openshift_aws_ami_copy_region }}"
+ source_region: "{{ r_openshift_aws_ami_copy_region }}"
+ name: "{{ r_openshift_aws_ami_copy_name }}"
+ source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}"
+ encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}"
+ kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}"
+ wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}"
+ tags: "{{ r_openshift_aws_ami_copy_tags }}"
+ register: copy_result
+
+- debug: var=copy_result
+
+- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami
+ set_fact:
+ r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md
new file mode 100644
index 000000000..ecc45fa14
--- /dev/null
+++ b/roles/openshift_aws_elb/README.md
@@ -0,0 +1,75 @@
+openshift_aws_elb
+=========
+
+Ansible role to provision and manage AWS ELB's for Openshift.
+
+Requirements
+------------
+
+Ansible Modules:
+
+- ec2_elb
+- ec2_elb_lb
+
+python package:
+
+python-boto
+
+Role Variables
+--------------
+
+- r_openshift_aws_elb_instances: instances to put in ELB
+- r_openshift_aws_elb_elb_name: name of elb
+- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to
+- r_openshift_aws_elb_region: AWS Region
+- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb
+```yaml
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+```
+- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb
+```yaml
+- protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: 443
+- protocol: ssl
+ load_balancer_port: 443
+ instance_protocol: ssl
+ instance_port: 443
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
+```
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+```yaml
+- include_role:
+ name: openshift_aws_elb
+ vars:
+ r_openshift_aws_elb_instances: aws_instances_to_put_in_elb
+ r_openshift_aws_elb_elb_name: elb_name
+ r_openshift_aws_elb_security_groups: security_group_names
+ r_openshift_aws_elb_region: aws_region
+ r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}"
+ r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}"
+```
+
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml
new file mode 100644
index 000000000..ed5d38079
--- /dev/null
+++ b/roles/openshift_aws_elb/defaults/main.yml
@@ -0,0 +1,33 @@
+---
+r_openshift_aws_elb_health_check:
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+
+r_openshift_aws_elb_cert_arn: ''
+
+r_openshift_aws_elb_listeners:
+ master:
+ external:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: 443
+ - protocol: ssl
+ load_balancer_port: 443
+ instance_protocol: ssl
+ instance_port: 443
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
+ internal:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 80
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml
new file mode 100644
index 000000000..58be652a5
--- /dev/null
+++ b/roles/openshift_aws_elb/meta/main.yml
@@ -0,0 +1,12 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: Openshift ELB provisioning
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies: []
diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml
new file mode 100644
index 000000000..64ec18545
--- /dev/null
+++ b/roles/openshift_aws_elb/tasks/main.yml
@@ -0,0 +1,57 @@
+---
+- name: fetch the default subnet id
+ ec2_remote_facts:
+ region: "{{ r_openshift_aws_elb_region }}"
+ filters: "{{ r_openshift_aws_elb_instance_filter }}"
+ register: instancesout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ r_openshift_aws_elb_region }}"
+ filters:
+ "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}"
+ register: subnetout
+
+- name:
+ debug:
+ msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
+ if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type
+ else r_openshift_aws_elb_listeners }}"
+
+- name: "Create ELB {{ r_openshift_aws_elb_name }}"
+ ec2_elb_lb:
+ name: "{{ r_openshift_aws_elb_name }}"
+ state: present
+ security_group_names: "{{ r_openshift_aws_elb_security_groups }}"
+ idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}"
+ region: "{{ r_openshift_aws_elb_region }}"
+ subnets:
+ - "{{ subnetout.subnets[0].id }}"
+ health_check: "{{ r_openshift_aws_elb_health_check }}"
+ listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
+ if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type
+ else r_openshift_aws_elb_listeners }}"
+ scheme: "{{ r_openshift_aws_elb_scheme }}"
+ tags:
+ KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}"
+ register: new_elb
+
+# It is necessary to ignore_errors here because the instances are not in 'ready'
+# state when first added to ELB
+- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}"
+ ec2_elb:
+ instance_id: "{{ item.id }}"
+ ec2_elbs: "{{ r_openshift_aws_elb_name }}"
+ state: present
+ region: "{{ r_openshift_aws_elb_region }}"
+ wait: False
+ with_items: "{{ instancesout.instances }}"
+ ignore_errors: True
+ retries: 10
+ register: elb_call
+ until: elb_call|succeeded
+
+- debug:
+ msg: "{{ item }}"
+ with_items:
+ - "{{ new_elb }}"
diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md
new file mode 100644
index 000000000..9468e785c
--- /dev/null
+++ b/roles/openshift_aws_iam_kms/README.md
@@ -0,0 +1,43 @@
+openshift_aws_iam_kms
+=========
+
+Ansible role to create AWS IAM KMS keys for encryption
+
+Requirements
+------------
+
+Ansible Modules:
+
+oo_iam_kms
+
+Role Variables
+--------------
+
+- r_openshift_aws_iam_kms_region: AWS region to create KMS key
+- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key
+
+Dependencies
+------------
+
+lib_utils
+
+Example Playbook
+----------------
+```yaml
+- include_role:
+ name: openshift_aws_iam_kms
+ vars:
+ r_openshift_aws_iam_kms_region: 'us-east-1'
+ r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms'
+```
+
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/lib_utils/tasks/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml
index ed97d539c..ed97d539c 100644
--- a/roles/lib_utils/tasks/main.yml
+++ b/roles/openshift_aws_iam_kms/defaults/main.yml
diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml
new file mode 100644
index 000000000..e29aaf96b
--- /dev/null
+++ b/roles/openshift_aws_iam_kms/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: AWS IAM KMS setup and management
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- lib_utils
diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml
new file mode 100644
index 000000000..32aac2666
--- /dev/null
+++ b/roles/openshift_aws_iam_kms/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- fail:
+ msg: "{{ item.name }} needs to be defined."
+ when: item.cond | bool
+ with_items:
+ - name: "{{ r_openshift_aws_iam_kms_alias }}"
+ cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}"
+ - name: "{{ r_openshift_aws_iam_kms_region }}"
+ cond: "{{ r_openshift_aws_iam_kms_region is undefined }}"
+
+- name: Create IAM KMS key with alias
+ oo_iam_kms:
+ state: present
+ alias: "{{ r_openshift_aws_iam_kms_alias }}"
+ region: "{{ r_openshift_aws_iam_kms_region }}"
+ register: created_kms
+
+- debug: var=created_kms.results
diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md
new file mode 100644
index 000000000..52b7e83b6
--- /dev/null
+++ b/roles/openshift_aws_launch_config/README.md
@@ -0,0 +1,72 @@
+openshift_aws_launch_config
+=========
+
+Ansible role to create an AWS launch config for a scale group.
+
+This includes the AMI, volumes, user_data, etc.
+
+Requirements
+------------
+
+Ansible Modules:
+
+
+Role Variables
+--------------
+- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
+- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
+- r_openshift_aws_launch_config_region: "{{ region }}"
+- r_openshift_aws_launch_config: "{{ node_group_config }}"
+```yaml
+ master:
+ instance_type: m4.xlarge
+ ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+```
+- r_openshift_aws_launch_config_type: compute
+- r_openshift_aws_launch_config_custom_image: ami-xxxxx
+- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig>
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+```yaml
+ - name: create compute nodes config
+ include_role:
+ name: openshift_aws_launch_config
+ vars:
+ r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
+ r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
+ r_openshift_aws_launch_config_region: "{{ region }}"
+ r_openshift_aws_launch_config: "{{ node_group_config }}"
+ r_openshift_aws_launch_config_type: compute
+ r_openshift_aws_launch_config_custom_image: ami-1234
+ r_openshift_aws_launch_config_bootstrap_token: abcd
+```
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_aws_launch_config/defaults/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml
new file mode 100644
index 000000000..e61670cc2
--- /dev/null
+++ b/roles/openshift_aws_launch_config/meta/main.yml
@@ -0,0 +1,12 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: Openshift AWS VPC creation
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 2.3
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies: []
diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml
new file mode 100644
index 000000000..437cf1f71
--- /dev/null
+++ b/roles/openshift_aws_launch_config/tasks/main.yml
@@ -0,0 +1,50 @@
+---
+- name: fail when params are not set
+ fail:
+ msg: Please specify the role parameters.
+ when:
+ - r_openshift_aws_launch_config_cluseterid is undefined
+ - r_openshift_aws_launch_config_type is undefined
+ - r_openshift_aws_launch_config_region is undefined
+ - r_openshift_aws_launch_config is undefined
+
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name:
+ - "{{ r_openshift_aws_launch_config_clusterid }}" # default sg
+ - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}" # node type sg
+ - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s" # node type sg k8s
+ region: "{{ r_openshift_aws_launch_config_region }}"
+ register: ec2sgs
+
+# Create the scale group config
+- name: Create the node scale group config
+ ec2_lc:
+ name: "{{ r_openshift_aws_launch_config_name }}"
+ region: "{{ r_openshift_aws_launch_config_region }}"
+ image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}"
+ instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}"
+ security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
+ user_data: |-
+ #cloud-config
+ {% if r_openshift_aws_launch_config_type != 'master' %}
+ write_files:
+ - path: /root/csr_kubeconfig
+ owner: root:root
+ permissions: '0640'
+ content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
+ - path: /root/openshift_settings
+ owner: root:root
+ permissions: '0640'
+ content:
+ openshift_type: "{{ r_openshift_aws_launch_config_type }}"
+ runcmd:
+ - [ systemctl, enable, atomic-openshift-node]
+ - [ systemctl, start, atomic-openshift-node]
+ {% endif %}
+ key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}"
+ assign_public_ip: True
+ register: test
diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2
new file mode 100644
index 000000000..1a1e29550
--- /dev/null
+++ b/roles/openshift_aws_launch_config/templates/cloud-init.j2
@@ -0,0 +1,9 @@
+{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %}
+#cloud-config
+write_files:
+- path: /root/csr_kubeconfig
+ owner: root:root
+ permissions: '0640'
+ content: |-
+ {{ r_openshift_aws_launch_config_bootstrap_token }}
+{% endif %}
diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md
new file mode 100644
index 000000000..c32c57bc5
--- /dev/null
+++ b/roles/openshift_aws_node_group/README.md
@@ -0,0 +1,77 @@
+openshift_aws_node_group
+=========
+
+Ansible role to create an aws node group.
+
+This includes the security group, launch config, and scale group.
+
+Requirements
+------------
+
+Ansible Modules:
+
+
+Role Variables
+--------------
+```yaml
+- r_openshift_aws_node_group_name: myscalegroup
+- r_openshift_aws_node_group_clusterid: myclusterid
+- r_openshift_aws_node_group_region: us-east-1
+- r_openshift_aws_node_group_lc_name: launch_config
+- r_openshift_aws_node_group_type: master|infra|compute
+- r_openshift_aws_node_group_config: "{{ node_group_config }}"
+```yaml
+master:
+ instance_type: m4.xlarge
+ ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+```
+- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
+
+```yaml
+us-east-1a # name of subnet
+```
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+```yaml
+ - name: "create {{ openshift_build_node_type }} node groups"
+ include_role:
+ name: openshift_aws_node_group
+ vars:
+ r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute"
+ r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
+ r_openshift_aws_node_group_clusterid: "{{ clusterid }}"
+ r_openshift_aws_node_group_region: "{{ region }}"
+ r_openshift_aws_node_group_config: "{{ node_group_config }}"
+ r_openshift_aws_node_group_type: compute
+ r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
+```
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml
new file mode 100644
index 000000000..44c5116a1
--- /dev/null
+++ b/roles/openshift_aws_node_group/defaults/main.yml
@@ -0,0 +1,58 @@
+---
+r_openshift_aws_node_group_type: master
+
+r_openshift_aws_node_group_config:
+ tags:
+ clusterid: "{{ r_openshift_aws_node_group_clusterid }}"
+ master:
+ instance_type: m4.xlarge
+ ami: "{{ r_openshift_aws_node_group_ami }}"
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+ compute:
+ instance_type: m4.xlarge
+ ami: "{{ r_openshift_aws_node_group_ami }}"
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 100
+ desired_size: 3
+ tags:
+ host-type: node
+ sub-host-type: compute
+ infra:
+ instance_type: m4.xlarge
+ ami: "{{ r_openshift_aws_node_group_ami }}"
+ volumes:
+ - device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 2
+ max_size: 20
+ desired_size: 2
+ tags:
+ host-type: node
+ sub-host-type: infra
diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml
new file mode 100644
index 000000000..6f5364b03
--- /dev/null
+++ b/roles/openshift_aws_node_group/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+- name: validate role inputs
+ fail:
+ msg: Please pass in the required role variables
+ when:
+ - r_openshift_aws_node_group_clusterid is not defined
+ - r_openshift_aws_node_group_region is not defined
+ - r_openshift_aws_node_group_subnet_name is not defined
+
+- name: fetch the subnet to use in scale group
+ ec2_vpc_subnet_facts:
+ region: "{{ r_openshift_aws_node_group_region }}"
+ filters:
+ "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}"
+ register: subnetout
+
+- name: Create the scale group
+ ec2_asg:
+ name: "{{ r_openshift_aws_node_group_name }}"
+ launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}"
+ health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}"
+ health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}"
+ min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}"
+ max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}"
+ desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}"
+ region: "{{ r_openshift_aws_node_group_region }}"
+ termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
+ load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
+ wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}"
+ vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+ tags:
+ - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md
new file mode 100644
index 000000000..afafe61cf
--- /dev/null
+++ b/roles/openshift_aws_s3/README.md
@@ -0,0 +1,43 @@
+openshift_aws_s3
+=========
+
+Ansible role to create an s3 bucket
+
+Requirements
+------------
+
+Ansible Modules:
+
+
+Role Variables
+--------------
+
+- r_openshift_aws_s3_clusterid: myclusterid
+- r_openshift_aws_s3_region: us-east-1
+- r_openshift_aws_s3_mode: create|delete
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+```yaml
+- name: create an s3 bucket
+ include_role:
+ name: openshift_aws_s3
+ vars:
+ r_openshift_aws_s3_clusterid: mycluster
+ r_openshift_aws_s3_region: us-east-1
+ r_openshift_aws_s3_mode: create
+```
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml
new file mode 100644
index 000000000..46bd781bd
--- /dev/null
+++ b/roles/openshift_aws_s3/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Create an s3 bucket
+ s3:
+ bucket: "{{ r_openshift_aws_s3_clusterid }}"
+ mode: "{{ r_openshift_aws_s3_mode }}"
+ region: "{{ r_openshift_aws_s3_region }}"
diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md
new file mode 100644
index 000000000..eeb76bbb6
--- /dev/null
+++ b/roles/openshift_aws_sg/README.md
@@ -0,0 +1,59 @@
+openshift_aws_sg
+=========
+
+Ansible role to create an aws security groups
+
+Requirements
+------------
+
+Ansible Modules:
+
+
+Role Variables
+--------------
+
+- r_openshift_aws_sg_clusterid: myclusterid
+- r_openshift_aws_sg_region: us-east-1
+- r_openshift_aws_sg_type: master|infra|compute
+```yaml
+# defaults/main.yml
+ default:
+ name: "{{ r_openshift_aws_sg_clusterid }}"
+ desc: "{{ r_openshift_aws_sg_clusterid }} default"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: all
+ from_port: all
+ to_port: all
+ group_name: "{{ r_openshift_aws_sg_clusterid }}"
+```
+
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+```yaml
+- name: create security groups for master
+ include_role:
+ name: openshift_aws_sg
+ vars:
+ r_openshift_aws_sg_clusterid: mycluster
+ r_openshift_aws_sg_region: us-east-1
+ r_openshift_aws_sg_type: master
+```
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml
new file mode 100644
index 000000000..9c480d337
--- /dev/null
+++ b/roles/openshift_aws_sg/defaults/main.yml
@@ -0,0 +1,48 @@
+---
+r_openshift_aws_sg_sg:
+ default:
+ name: "{{ r_openshift_aws_sg_clusterid }}"
+ desc: "{{ r_openshift_aws_sg_clusterid }} default"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: all
+ from_port: all
+ to_port: all
+ group_name: "{{ r_openshift_aws_sg_clusterid }}"
+ master:
+ name: "{{ r_openshift_aws_sg_clusterid }}_master"
+ desc: "{{ r_openshift_aws_sg_clusterid }} master instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ compute:
+ name: "{{ r_openshift_aws_sg_clusterid }}_compute"
+ desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances"
+ infra:
+ name: "{{ r_openshift_aws_sg_clusterid }}_infra"
+ desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 30000
+ to_port: 32000
+ cidr_ip: 0.0.0.0/0
+ etcd:
+ name: "{{ r_openshift_aws_sg_clusterid }}_etcd"
+ desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances"
diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml
new file mode 100644
index 000000000..2294fdcc9
--- /dev/null
+++ b/roles/openshift_aws_sg/tasks/main.yml
@@ -0,0 +1,53 @@
+---
+- name: Validate role inputs
+ fail:
+ msg: Please ensure to pass the correct variables
+ when:
+ - r_openshift_aws_sg_region is undefined
+ - r_openshift_aws_sg_region is undefined
+
+
+- name: Fetch the VPC for vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ r_openshift_aws_sg_region }}"
+ filters:
+ "tag:Name": "{{ r_openshift_aws_sg_clusterid }}"
+ register: vpcout
+
+- name: Create default security group for cluster
+ ec2_group:
+ name: "{{ r_openshift_aws_sg_sg.default.name }}"
+ description: "{{ r_openshift_aws_sg_sg.default.desc }}"
+ region: "{{ r_openshift_aws_sg_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}"
+ register: sg_default_created
+
+- name: create the node group sgs
+ ec2_group:
+ name: "{{ item.name}}"
+ description: "{{ item.desc }}"
+ rules: "{{ item.rules if 'rules' in item else [] }}"
+ region: "{{ r_openshift_aws_sg_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: sg_create
+ with_items:
+ - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}"
+
+- name: create the k8s sgs for the node group
+ ec2_group:
+ name: "{{ item.name }}_k8s"
+ description: "{{ item.desc }} for k8s"
+ region: "{{ r_openshift_aws_sg_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: k8s_sg_create
+ with_items:
+ - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}"
+
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags:
+ KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ r_openshift_aws_sg_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md
new file mode 100644
index 000000000..4f8667918
--- /dev/null
+++ b/roles/openshift_aws_ssh_keys/README.md
@@ -0,0 +1,49 @@
+openshift_aws_ssh_keys
+=========
+
+Ansible role for sshind SSH keys
+
+Requirements
+------------
+
+Ansible Modules:
+
+
+Role Variables
+--------------
+
+- r_openshift_aws_ssh_keys_users: list of dicts of users
+- r_openshift_aws_ssh_keys_region: ec2_region to install the keys
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+```yaml
+users:
+- username: user1
+ pub_key: <user1 ssh public key>
+- username: user2
+ pub_key: <user2 ssh public key>
+
+region: us-east-1
+
+- include_role:
+ name: openshift_aws_ssh_keys
+ vars:
+ r_openshift_aws_ssh_keys_users: "{{ users }}"
+ r_openshift_aws_ssh_keys_region: "{{ region }}"
+```
+
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws_ssh_keys/tasks/main.yml
new file mode 100644
index 000000000..232cf20ed
--- /dev/null
+++ b/roles/openshift_aws_ssh_keys/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Add the public keys for the users
+ ec2_key:
+ name: "{{ item.key_name }}"
+ key_material: "{{ item.pub_key }}"
+ region: "{{ r_openshift_aws_ssh_keys_region }}"
+ with_items: "{{ r_openshift_aws_ssh_keys_users }}"
+ no_log: True
diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md
new file mode 100644
index 000000000..d88cf0581
--- /dev/null
+++ b/roles/openshift_aws_vpc/README.md
@@ -0,0 +1,62 @@
+openshift_aws_vpc
+=========
+
+Ansible role to create a default AWS VPC
+
+Requirements
+------------
+
+Ansible Modules:
+
+
+Role Variables
+--------------
+
+- r_openshift_aws_vpc_clusterid: "{{ clusterid }}"
+- r_openshift_aws_vpc_cidr: 172.31.48.0/20
+- r_openshift_aws_vpc_subnets: "{{ subnets }}"
+```yaml
+ subnets:
+ us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
+ - cidr: 172.31.48.0/20
+ az: "us-east-1c"
+ - cidr: 172.31.32.0/20
+ az: "us-east-1e"
+ - cidr: 172.31.16.0/20
+ az: "us-east-1a"
+```
+- r_openshift_aws_vpc_region: "{{ region }}"
+- r_openshift_aws_vpc_tags: dict of tags to apply to vpc
+- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}"
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+```yaml
+ - name: create default vpc
+ include_role:
+ name: openshift_aws_vpc
+ vars:
+ r_openshift_aws_vpc_clusterid: mycluster
+ r_openshift_aws_vpc_cidr: 172.31.48.0/20
+ r_openshift_aws_vpc_subnets: "{{ subnets }}"
+ r_openshift_aws_vpc_region: us-east-1
+ r_openshift_aws_vpc_tags: {}
+ r_openshift_aws_vpc_name: mycluster
+
+```
+
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift
diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_aws_vpc/defaults/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws_vpc/tasks/main.yml
new file mode 100644
index 000000000..cfe08dae5
--- /dev/null
+++ b/roles/openshift_aws_vpc/tasks/main.yml
@@ -0,0 +1,53 @@
+---
+- name: Create AWS VPC
+ ec2_vpc_net:
+ state: present
+ cidr_block: "{{ r_openshift_aws_vpc_cidr }}"
+ dns_support: True
+ dns_hostnames: True
+ region: "{{ r_openshift_aws_vpc_region }}"
+ name: "{{ r_openshift_aws_vpc_clusterid }}"
+ tags:
+ Name: "{{ r_openshift_aws_vpc_clusterid }}"
+ register: vpc
+
+- name: Sleep to avoid a race condition when creating the vpc
+ pause:
+ seconds: 5
+ when: vpc.changed
+
+- name: assign the vpc igw
+ ec2_vpc_igw:
+ region: "{{ r_openshift_aws_vpc_region }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ register: igw
+
+- name: assign the vpc subnets
+ ec2_vpc_subnet:
+ region: "{{ r_openshift_aws_vpc_region }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ resource_tags:
+ Name: "{{ item.az }}"
+ with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}"
+
+- name: Grab the route tables from our VPC
+ ec2_vpc_route_table_facts:
+ region: "{{ r_openshift_aws_vpc_region }}"
+ filters:
+ vpc-id: "{{ vpc.vpc.id }}"
+ register: route_table
+
+- name: update the route table in the vpc
+ ec2_vpc_route_table:
+ lookup: id
+ route_table_id: "{{ route_table.route_tables[0].id }}"
+ vpc_id: "{{ vpc.vpc.id }}"
+ region: "{{ r_openshift_aws_vpc_region }}"
+ tags:
+ Name: "{{ r_openshift_aws_vpc_name }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: igw
+ register: route_table_out
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
index 393bee1f3..b82c2e602 100644
--- a/roles/openshift_cfme/defaults/main.yml
+++ b/roles/openshift_cfme/defaults/main.yml
@@ -27,8 +27,6 @@ openshift_cfme_pv_data:
# Tuning parameter to use more than 5 images at once from an ImageStream
openshift_cfme_maxImagesBulkImportedPerRepository: 100
-# Hostname/IP of the NFS server. Currently defaults to first master
-openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"
# TODO: Refactor '_install_app' variable. This is just for testing but
# maybe in the future it should control the entire yes/no for CFME.
#
diff --git a/roles/openshift_cfme/tasks/nfs.yml b/roles/openshift_cfme/tasks/nfs.yml
index 8db45492e..ca04628a8 100644
--- a/roles/openshift_cfme/tasks/nfs.yml
+++ b/roles/openshift_cfme/tasks/nfs.yml
@@ -1,6 +1,13 @@
---
# Tasks to statically provision NFS volumes
# Include if not using dynamic volume provisioning
+
+- name: Set openshift_cfme_nfs_server fact
+ when: openshift_cfme_nfs_server is not defined
+ set_fact:
+ # Hostname/IP of the NFS server. Currently defaults to first master
+ openshift_cfme_nfs_server: "{{ oo_nfs_to_config.0 }}"
+
- name: Ensure the /exports/ directory exists
file:
path: /exports/
diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
index b8c3bb277..280f3e97a 100644
--- a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
+++ b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
@@ -8,6 +8,6 @@ spec:
accessModes:
- ReadWriteOnce
nfs:
- path: /exports/miq-pv01
+ path: {{ openshift_cfme_nfs_directory }}/miq-pv01
server: {{ openshift_cfme_nfs_server }}
persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
index 7218773f0..fe80dffa5 100644
--- a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
+++ b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
@@ -8,6 +8,6 @@ spec:
accessModes:
- ReadWriteOnce
nfs:
- path: /exports/miq-pv02
+ path: {{ openshift_cfme_nfs_directory }}/miq-pv02
server: {{ openshift_cfme_nfs_server }}
persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
index 7b40b6c69..f84b67ea9 100644
--- a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
+++ b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
@@ -8,6 +8,6 @@ spec:
accessModes:
- ReadWriteOnce
nfs:
- path: /exports/miq-pv03
+ path: {{ openshift_cfme_nfs_directory }}/miq-pv03
server: {{ openshift_cfme_nfs_server }}
persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index c1de367d9..04a1ce873 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -15,4 +15,4 @@ dependencies:
- role: openshift_docker
when: not skip_docker_role | default(False) | bool
- role: openshift_common
-- role: openshift_cli_facts
+- role: openshift_facts
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index c716a0860..9e61805f9 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_docker_use_crio | default(false) }}"
+ l_use_crio: "{{ openshift_use_crio | default(false) }}"
- name: Install clients
package: name={{ openshift.common.service_type }}-clients state=present
diff --git a/roles/openshift_cli_facts/meta/main.yml b/roles/openshift_cli_facts/meta/main.yml
deleted file mode 100644
index 59acde215..000000000
--- a/roles/openshift_cli_facts/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift CLI Facts
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_cli_facts/tasks/main.yml b/roles/openshift_cli_facts/tasks/main.yml
deleted file mode 100644
index dd1ed8965..000000000
--- a/roles/openshift_cli_facts/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# TODO: move this to a new 'cli' role
-- openshift_facts:
- role: common
- local_facts:
- cli_image: "{{ osm_image | default(None) }}"
diff --git a/roles/openshift_clock/defaults/main.yml b/roles/openshift_clock/defaults/main.yml
new file mode 100644
index 000000000..a94f67199
--- /dev/null
+++ b/roles/openshift_clock/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_clock_enabled: True
diff --git a/roles/openshift_clock/meta/main.yml b/roles/openshift_clock/meta/main.yml
index 3e175beb0..d1e86d826 100644
--- a/roles/openshift_clock/meta/main.yml
+++ b/roles/openshift_clock/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_facts }
+dependencies: []
diff --git a/roles/openshift_clock/tasks/main.yaml b/roles/openshift_clock/tasks/main.yaml
index 3911201ea..f8b02524a 100644
--- a/roles/openshift_clock/tasks/main.yaml
+++ b/roles/openshift_clock/tasks/main.yaml
@@ -1,14 +1,15 @@
---
-- name: Set clock facts
- openshift_facts:
- role: clock
- local_facts:
- enabled: "{{ openshift_clock_enabled | default(None) }}"
+- name: Determine if chrony is installed
+ command: rpm -q chrony
+ failed_when: false
+ register: chrony_installed
- name: Install ntp package
package: name=ntp state=present
- when: openshift.clock.enabled | bool and not openshift.clock.chrony_installed | bool
+ when:
+ - openshift_clock_enabled | bool
+ - chrony_installed.rc != 0
- name: Start and enable ntpd/chronyd
- shell: timedatectl set-ntp true
- when: openshift.clock.enabled | bool
+ command: timedatectl set-ntp true
+ when: openshift_clock_enabled | bool
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 51313a258..a0bd6c860 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -40,8 +40,8 @@
when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
- fail:
- msg: openshift_hostname must be 64 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 64
+ msg: openshift_hostname must be 63 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 63
- name: Set common Cluster facts
openshift_facts:
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 516d7dc29..334150f63 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -17,7 +17,7 @@
hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
use_system_container: "{{ openshift_docker_use_system_container | default(False) }}"
- use_crio: "{{ openshift_docker_use_crio | default(False) }}"
+ use_crio: "{{ openshift_use_crio | default(False) }}"
- role: node
local_facts:
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 3a8ce55c4..ca3f219d8 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -40,5 +40,6 @@ popd
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-example.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-example.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-pgsql-persistent.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-pgsql-persistent.json
+wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-runtime-example.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-runtime-example.json
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml
index 3bc6c5813..fd57a864c 100644
--- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml
@@ -10,6 +10,12 @@ metadata:
iconClass: "icon-rails"
objects:
- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+- apiVersion: v1
kind: Service
metadata:
annotations:
@@ -148,7 +154,10 @@ objects:
value: "${DATABASE_USER}"
-
name: "POSTGRESQL_PASSWORD"
- value: "${DATABASE_PASSWORD}"
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: "pg-password"
-
name: "POSTGRESQL_DATABASE"
value: "${DATABASE_NAME}"
@@ -345,7 +354,10 @@ objects:
value: "${DATABASE_USER}"
-
name: "POSTGRESQL_PASSWORD"
- value: "${DATABASE_PASSWORD}"
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: "pg-password"
-
name: "POSTGRESQL_DATABASE"
value: "${DATABASE_NAME}"
@@ -386,7 +398,8 @@ parameters:
displayName: "PostgreSQL Password"
required: true
description: "Password for the PostgreSQL user."
- value: "smartvm"
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
-
name: "DATABASE_NAME"
required: true
diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
index 3bc6c5813..fd57a864c 100644
--- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
@@ -10,6 +10,12 @@ metadata:
iconClass: "icon-rails"
objects:
- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+- apiVersion: v1
kind: Service
metadata:
annotations:
@@ -148,7 +154,10 @@ objects:
value: "${DATABASE_USER}"
-
name: "POSTGRESQL_PASSWORD"
- value: "${DATABASE_PASSWORD}"
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: "pg-password"
-
name: "POSTGRESQL_DATABASE"
value: "${DATABASE_NAME}"
@@ -345,7 +354,10 @@ objects:
value: "${DATABASE_USER}"
-
name: "POSTGRESQL_PASSWORD"
- value: "${DATABASE_PASSWORD}"
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: "pg-password"
-
name: "POSTGRESQL_DATABASE"
value: "${DATABASE_NAME}"
@@ -386,7 +398,8 @@ parameters:
displayName: "PostgreSQL Password"
required: true
description: "Password for the PostgreSQL user."
- value: "smartvm"
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
-
name: "DATABASE_NAME"
required: true
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/OWNERS b/roles/openshift_examples/files/examples/v3.6/db-templates/OWNERS
new file mode 100644
index 000000000..cbdc20f41
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/OWNERS
@@ -0,0 +1,12 @@
+reviewers:
+ - bparees
+ - gabemontero
+ - mfojtik
+ - dinhxuanvu
+ - jim-minter
+ - spadgett
+approvers:
+ - bparees
+ - mfojtik
+ - spadgett
+ - jupierce
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
index 536f7275e..6500ed0d3 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
@@ -27,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
"database-password" : "${MYSQL_PASSWORD}",
- "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}",
+ "database-name" : "${MYSQL_DATABASE}"
}
},
{
@@ -61,7 +63,10 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -151,7 +156,12 @@
},
{
"name": "MYSQL_DATABASE",
- "value": "${MYSQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
index 3b7fdccce..4378fa4a0 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
@@ -27,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
"database-password" : "${MYSQL_PASSWORD}",
- "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}",
+ "database-name" : "${MYSQL_DATABASE}"
}
},
{
@@ -78,7 +80,10 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -168,7 +173,12 @@
},
{
"name": "MYSQL_DATABASE",
- "value": "${MYSQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
index ee274194f..7271a2c69 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "mongodb-ephemeral",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "MongoDB (Ephemeral)",
"description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
@@ -28,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
"database-password" : "${MONGODB_PASSWORD}",
- "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}",
+ "database-name" : "${MONGODB_DATABASE}"
}
},
{
@@ -42,7 +43,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
}
@@ -72,7 +72,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -104,7 +106,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -164,7 +165,12 @@
},
{
"name": "MONGODB_DATABASE",
- "value": "${MONGODB_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
index e5ba43669..d70d2263f 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "mongodb-persistent",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "MongoDB (Persistent)",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
@@ -28,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
"database-password" : "${MONGODB_PASSWORD}",
- "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}",
+ "database-name" : "${MONGODB_DATABASE}"
}
},
{
@@ -42,7 +43,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
}
@@ -89,7 +89,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -121,7 +123,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -181,7 +182,12 @@
},
{
"name": "MONGODB_DATABASE",
- "value": "${MONGODB_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
index 969e62ac5..54785993c 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
@@ -19,7 +19,7 @@
"template": "mysql-ephemeral-template"
},
"objects": [
- {
+ {
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
@@ -27,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
"database-password" : "${MYSQL_PASSWORD}",
- "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}",
+ "database-name" : "${MYSQL_DATABASE}"
}
},
{
@@ -41,7 +43,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
}
@@ -71,7 +72,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -103,7 +106,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -164,7 +166,12 @@
},
{
"name": "MYSQL_DATABASE",
- "value": "${MYSQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
index 4f39d41a5..2bd84b106 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
@@ -27,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
"database-password" : "${MYSQL_PASSWORD}",
- "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}",
+ "database-name" : "${MYSQL_DATABASE}"
}
},
{
@@ -78,7 +80,10 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -168,7 +173,12 @@
},
{
"name": "MYSQL_DATABASE",
- "value": "${MYSQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
index c37102cb0..849c9d83f 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "postgresql-ephemeral",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "PostgreSQL (Ephemeral)",
"description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
@@ -27,12 +26,14 @@
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
- "template.openshift.io/expose-password": "{.data['database-password']}"
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
- "database-password" : "${POSTGRESQL_PASSWORD}"
+ "database-password" : "${POSTGRESQL_PASSWORD}",
+ "database-name" : "${POSTGRESQL_DATABASE}"
}
},
{
@@ -40,7 +41,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
}
@@ -70,7 +70,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -102,7 +104,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -153,7 +154,12 @@
},
{
"name": "POSTGRESQL_DATABASE",
- "value": "${POSTGRESQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
index 32dc93a95..b622baa01 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "postgresql-persistent",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "PostgreSQL (Persistent)",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
@@ -27,12 +26,14 @@
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
- "template.openshift.io/expose-password": "{.data['database-password']}"
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
- "database-password" : "${POSTGRESQL_PASSWORD}"
+ "database-password" : "${POSTGRESQL_PASSWORD}",
+ "database-name" : "${POSTGRESQL_DATABASE}"
}
},
{
@@ -40,7 +41,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
}
@@ -87,7 +87,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -119,7 +121,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -170,7 +171,12 @@
},
{
"name": "POSTGRESQL_DATABASE",
- "value": "${POSTGRESQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
index 6bb683e52..15bdd079b 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "redis-ephemeral",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "Redis (Ephemeral)",
"description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
@@ -38,7 +37,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
}
@@ -68,7 +66,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -100,7 +100,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
index 9e8be2309..1e31b02e0 100644
--- a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "redis-persistent",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "Redis (Persistent)",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
@@ -38,7 +37,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
}
@@ -85,7 +83,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -117,7 +117,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/OWNERS b/roles/openshift_examples/files/examples/v3.6/image-streams/OWNERS
new file mode 100644
index 000000000..6ddf77f12
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/OWNERS
@@ -0,0 +1,14 @@
+reviewers:
+ - bparees
+ - sspeiche
+ - mfojtik
+ - liggitt
+ - jcantrill
+ - hhorak
+ - csrwng
+approvers:
+ - bparees
+ - mfojtik
+ - liggitt
+ - jcantrill
+ - csrwng
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
index 857ffa980..ee753966f 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
@@ -14,7 +14,7 @@
"metadata": {
"name": "dotnet",
"annotations": {
- "openshift.io/display-name": ".NET Core"
+ "openshift.io/display-name": ".NET Core Builder Images"
}
},
"spec": {
@@ -23,17 +23,35 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": ".NET Core (Latest)",
- "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
+ "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/build/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore",
"supports":"dotnet",
"sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
"sampleContextDir": "app",
- "sampleRef": "dotnetcore-1.1"
+ "sampleRef": "dotnetcore-2.0"
},
"from": {
"kind": "ImageStreamTag",
- "name": "1.1"
+ "name": "2.0"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 2.0",
+ "description": "Build and run .NET Core 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/build/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore,rh-dotnet20",
+ "supports":"dotnet:2.0,dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-2.0",
+ "version": "2.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnet-20-rhel7:2.0"
}
},
{
@@ -74,6 +92,49 @@
}
]
}
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-runtime",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core Runtime Images"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core Runtime (Latest)",
+ "description": "Run .NET Core applications on RHEL 7. For more information about using this image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/runtime/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core Runtime available on OpenShift, including major versions updates.",
+ "iconClass": "icon-dotnet",
+ "tags": "runtime,.net-runtime,dotnet-runtime,dotnetcore-runtime",
+ "supports":"dotnet-runtime"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.0"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 2.0 Runtime",
+ "description": "Run .NET Core applications on RHEL 7. For more information about using this image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/runtime/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "runtime,.net-runtime,dotnet-runtime,dotnetcore-runtime",
+ "supports":"dotnet-runtime",
+ "version": "2.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnet-20-runtime-rhel7:2.0"
+ }
+ }
+ ]
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/OWNERS b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/OWNERS
new file mode 100644
index 000000000..a26e484d6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/OWNERS
@@ -0,0 +1,12 @@
+reviewers:
+ - bparees
+ - gabemontero
+ - coreydaley
+ - dinhxuanvu
+ - sspeiche
+ - mfojtik
+ - jupierce
+approvers:
+ - bparees
+ - mfojtik
+ - jupierce
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
index 6d987ee33..289f809fa 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
@@ -89,7 +89,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -148,7 +149,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
index fb2ef206e..0562982b3 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
@@ -89,7 +89,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -148,7 +149,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
index 7ffb25e14..7a3875d09 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -146,7 +147,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
index d787e376b..399ec72a8 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -146,7 +147,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
index a2070207b..e37f7a492 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -146,7 +147,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
index 0d33c6e0e..965c2ebfe 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -146,7 +147,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
index af46579c8..f1fef3093 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
@@ -249,7 +249,7 @@
"displayName": ".NET builder",
"required": true,
"description": "The image stream tag which is used to build the code.",
- "value": "dotnet:1.0"
+ "value": "dotnet:2.0"
},
{
"name": "NAMESPACE",
@@ -269,7 +269,7 @@
"name": "SOURCE_REPOSITORY_REF",
"displayName": "Git Reference",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
- "value": "dotnetcore-1.0"
+ "value": "dotnetcore-2.0"
},
{
"name": "CONTEXT_DIR",
@@ -299,7 +299,7 @@
{
"name": "DOTNET_STARTUP_PROJECT",
"displayName": "Startup Project",
- "description": "Set this to the folder containing your startup project.",
+ "description": "Set this to a project file (e.g. csproj) or a folder containing a single project file.",
"value": "app"
},
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
index a2b59c2d3..c83132152 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
@@ -455,7 +455,7 @@
"displayName": ".NET builder",
"required": true,
"description": "The image stream tag which is used to build the code.",
- "value": "dotnet:1.1"
+ "value": "dotnet:2.0"
},
{
"name": "NAMESPACE",
@@ -475,7 +475,7 @@
"name": "SOURCE_REPOSITORY_REF",
"displayName": "Git Reference",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
- "value": "rel/1.1-example"
+ "value": "rel/2.0-example"
},
{
"name": "CONTEXT_DIR",
@@ -485,7 +485,7 @@
{
"name": "DOTNET_STARTUP_PROJECT",
"displayName": "Startup Project",
- "description": "Set this to the folder containing your startup project.",
+ "description": "Set this to a project file (e.g. csproj) or a folder containing a single project file.",
"value": "samples/MusicStore"
},
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-runtime-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-runtime-example.json
new file mode 100644
index 000000000..e1dccf290
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-runtime-example.json
@@ -0,0 +1,412 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-runtime-example",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core Runtime Example",
+ "description": "An example .NET Core Runtime example application.",
+ "tags": "quickstart,dotnet,.net",
+ "iconClass": "icon-dotnet",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application runtime image"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}-build",
+ "annotations": {
+ "description": "Keeps track of changes in the application builder image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}-build",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_BUILD_IMAGE_STREAM_TAG}"
+ },
+ "env": [
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "value": "${DOTNET_STARTUP_PROJECT}"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "value": "${DOTNET_ASSEMBLY_NAME}"
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "value": "${DOTNET_NPM_TOOLS}"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "value": "${DOTNET_TEST_PROJECTS}"
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "value": "${DOTNET_CONFIGURATION}"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "value": "${DOTNET_RESTORE_SOURCES}"
+ },
+ {
+ "name": "DOTNET_PACK",
+ "value": "true"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}-build:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}-runtime",
+ "annotations": {
+ "description": "Defines how to chain the runtime image from the source build image"
+ }
+ },
+ "spec": {
+ "source": {
+ "dockerfile": "FROM ${DOTNET_RUNTIME_IMAGE_STREAM_TAG}\nADD app.tar.gz .",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}-build:latest"
+ },
+ "paths": [
+ {
+ "sourcePath": "/opt/app-root/app.tar.gz",
+ "destinationDir": "."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Docker",
+ "dockerStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_RUNTIME_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}-build:latest"
+ }
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}-runtime",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dotnet-runtime-app"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dotnet-runtime-app",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 15
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 30
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": []
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dotnet-runtime-example"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "DOTNET_RUNTIME_IMAGE_STREAM_TAG",
+ "displayName": ".NET Runtime Imagestream Tag",
+ "description": "The image stream tag which is used to run the application.",
+ "required": true,
+ "value": "dotnet-runtime:2.0"
+ },
+ {
+ "name": "DOTNET_BUILD_IMAGE_STREAM_TAG",
+ "displayName": ".NET builder",
+ "required": true,
+ "description": "The image stream tag which is used to build the application.",
+ "value": "dotnet:2.0"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "value": "dotnetcore-2.0"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to use a subdirectory of the source code repository"
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "displayName": "Startup Project",
+ "description": "Set this to the folder containing your startup project.",
+ "value": "app"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "displayName": "Startup Assembly",
+ "description": "Set this when the assembly name is overridden in the project file."
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "displayName": "Npm Tools",
+ "description": "Set this to a space separated list of npm tools needed to publish.",
+ "value": "bower gulp"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "displayName": "Test projects",
+ "description": "Set this to a space separated list of test projects to run before publishing."
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "displayName": "Configuration",
+ "description": "Set this to configuration (Release/Debug).",
+ "value": "Release"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "displayName": "NuGet package sources",
+ "description": "Set this to override the NuGet.config sources."
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json
index ac671cc06..6cf9d76eb 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json
@@ -74,7 +74,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -130,7 +131,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
index ce96684a9..62f43bc0b 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "jenkins-ephemeral",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "Jenkins (Ephemeral)",
"description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
@@ -22,7 +21,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
}
@@ -43,7 +41,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -75,7 +75,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${JENKINS_SERVICE_NAME}"
}
@@ -221,8 +220,7 @@
"annotations": {
"service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
"service.openshift.io/infrastructure": "true"
- },
- "creationTimestamp": null
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
index 34b2b920b..e9068e455 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "jenkins-persistent",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "Jenkins (Persistent)",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
@@ -22,7 +21,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
}
@@ -60,7 +58,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -92,7 +92,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${JENKINS_SERVICE_NAME}"
}
@@ -238,8 +237,7 @@
"annotations": {
"service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
"service.openshift.io/infrastructure": "true"
- },
- "creationTimestamp": null
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
index a9c365361..df3704b9f 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -152,7 +153,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
index 53a6147d5..eb6ab33d9 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -152,7 +153,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
index f07a43071..59e2e41ea 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
@@ -93,7 +93,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -152,7 +153,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
index a7992c988..b3d080a91 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
@@ -93,7 +93,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -152,7 +153,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json
index 536f7275e..6500ed0d3 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json
@@ -27,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
"database-password" : "${MYSQL_PASSWORD}",
- "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}",
+ "database-name" : "${MYSQL_DATABASE}"
}
},
{
@@ -61,7 +63,10 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -151,7 +156,12 @@
},
{
"name": "MYSQL_DATABASE",
- "value": "${MYSQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json
index 3b7fdccce..4378fa4a0 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json
@@ -27,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
"database-password" : "${MYSQL_PASSWORD}",
- "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}",
+ "database-name" : "${MYSQL_DATABASE}"
}
},
{
@@ -78,7 +80,10 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -168,7 +173,12 @@
},
{
"name": "MYSQL_DATABASE",
- "value": "${MYSQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json
index ee274194f..7271a2c69 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "mongodb-ephemeral",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "MongoDB (Ephemeral)",
"description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
@@ -28,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
"database-password" : "${MONGODB_PASSWORD}",
- "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}",
+ "database-name" : "${MONGODB_DATABASE}"
}
},
{
@@ -42,7 +43,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
}
@@ -72,7 +72,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -104,7 +106,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -164,7 +165,12 @@
},
{
"name": "MONGODB_DATABASE",
- "value": "${MONGODB_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json
index e5ba43669..d70d2263f 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "mongodb-persistent",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "MongoDB (Persistent)",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
@@ -28,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}"
+ "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MONGODB_USER}",
"database-password" : "${MONGODB_PASSWORD}",
- "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}",
+ "database-name" : "${MONGODB_DATABASE}"
}
},
{
@@ -42,7 +43,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}"
}
@@ -89,7 +89,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -121,7 +123,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -181,7 +182,12 @@
},
{
"name": "MONGODB_DATABASE",
- "value": "${MONGODB_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json
index 969e62ac5..54785993c 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json
@@ -19,7 +19,7 @@
"template": "mysql-ephemeral-template"
},
"objects": [
- {
+ {
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
@@ -27,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
"database-password" : "${MYSQL_PASSWORD}",
- "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}",
+ "database-name" : "${MYSQL_DATABASE}"
}
},
{
@@ -41,7 +43,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}"
}
@@ -71,7 +72,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -103,7 +106,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -164,7 +166,12 @@
},
{
"name": "MYSQL_DATABASE",
- "value": "${MYSQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json
index 4f39d41a5..2bd84b106 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json
@@ -27,13 +27,15 @@
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
"template.openshift.io/expose-password": "{.data['database-password']}",
- "template.openshift.io/expose-root_password": "{.data['database-root-password']}"
+ "template.openshift.io/expose-root_password": "{.data['database-root-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${MYSQL_USER}",
"database-password" : "${MYSQL_PASSWORD}",
- "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}",
+ "database-name" : "${MYSQL_DATABASE}"
}
},
{
@@ -78,7 +80,10 @@
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
- "name": "${DATABASE_SERVICE_NAME}"
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -168,7 +173,12 @@
},
{
"name": "MYSQL_DATABASE",
- "value": "${MYSQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json
index c37102cb0..849c9d83f 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "postgresql-ephemeral",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "PostgreSQL (Ephemeral)",
"description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
@@ -27,12 +26,14 @@
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
- "template.openshift.io/expose-password": "{.data['database-password']}"
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
- "database-password" : "${POSTGRESQL_PASSWORD}"
+ "database-password" : "${POSTGRESQL_PASSWORD}",
+ "database-name" : "${POSTGRESQL_DATABASE}"
}
},
{
@@ -40,7 +41,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
}
@@ -70,7 +70,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -102,7 +104,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -153,7 +154,12 @@
},
{
"name": "POSTGRESQL_DATABASE",
- "value": "${POSTGRESQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json
index 32dc93a95..b622baa01 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "postgresql-persistent",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "PostgreSQL (Persistent)",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
@@ -27,12 +26,14 @@
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
"template.openshift.io/expose-username": "{.data['database-user']}",
- "template.openshift.io/expose-password": "{.data['database-password']}"
+ "template.openshift.io/expose-password": "{.data['database-password']}",
+ "template.openshift.io/expose-database_name": "{.data['database-name']}"
}
},
"stringData" : {
"database-user" : "${POSTGRESQL_USER}",
- "database-password" : "${POSTGRESQL_PASSWORD}"
+ "database-password" : "${POSTGRESQL_PASSWORD}",
+ "database-name" : "${POSTGRESQL_DATABASE}"
}
},
{
@@ -40,7 +41,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}"
}
@@ -87,7 +87,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -119,7 +121,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
@@ -170,7 +171,12 @@
},
{
"name": "POSTGRESQL_DATABASE",
- "value": "${POSTGRESQL_DATABASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-name"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json
index 6bb683e52..15bdd079b 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "redis-ephemeral",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "Redis (Ephemeral)",
"description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
@@ -38,7 +37,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
}
@@ -68,7 +66,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -100,7 +100,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json
index 9e8be2309..1e31b02e0 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "redis-persistent",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "Redis (Persistent)",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
@@ -38,7 +37,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}"
}
@@ -85,7 +83,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -117,7 +117,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
diff --git a/roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS b/roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS
index 4ccb64c74..6ddf77f12 100644
--- a/roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS
+++ b/roles/openshift_examples/files/examples/v3.7/image-streams/OWNERS
@@ -1,7 +1,6 @@
reviewers:
- bparees
- sspeiche
- - oatmealraisin
- mfojtik
- liggitt
- jcantrill
diff --git a/roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json
index 857ffa980..ee753966f 100644
--- a/roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v3.7/image-streams/dotnet_imagestreams.json
@@ -14,7 +14,7 @@
"metadata": {
"name": "dotnet",
"annotations": {
- "openshift.io/display-name": ".NET Core"
+ "openshift.io/display-name": ".NET Core Builder Images"
}
},
"spec": {
@@ -23,17 +23,35 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": ".NET Core (Latest)",
- "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
+ "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/build/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore",
"supports":"dotnet",
"sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
"sampleContextDir": "app",
- "sampleRef": "dotnetcore-1.1"
+ "sampleRef": "dotnetcore-2.0"
},
"from": {
"kind": "ImageStreamTag",
- "name": "1.1"
+ "name": "2.0"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 2.0",
+ "description": "Build and run .NET Core 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/build/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore,rh-dotnet20",
+ "supports":"dotnet:2.0,dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-2.0",
+ "version": "2.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnet-20-rhel7:2.0"
}
},
{
@@ -74,6 +92,49 @@
}
]
}
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-runtime",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core Runtime Images"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core Runtime (Latest)",
+ "description": "Run .NET Core applications on RHEL 7. For more information about using this image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/runtime/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core Runtime available on OpenShift, including major versions updates.",
+ "iconClass": "icon-dotnet",
+ "tags": "runtime,.net-runtime,dotnet-runtime,dotnetcore-runtime",
+ "supports":"dotnet-runtime"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.0"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 2.0 Runtime",
+ "description": "Run .NET Core applications on RHEL 7. For more information about using this image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/2.0/runtime/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "runtime,.net-runtime,dotnet-runtime,dotnetcore-runtime",
+ "supports":"dotnet-runtime",
+ "version": "2.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnet-20-runtime-rhel7:2.0"
+ }
+ }
+ ]
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json
index 6d987ee33..289f809fa 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json
@@ -89,7 +89,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -148,7 +149,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json
index fb2ef206e..0562982b3 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json
@@ -89,7 +89,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -148,7 +149,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json
index 7ffb25e14..7a3875d09 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -146,7 +147,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json
index d787e376b..399ec72a8 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -146,7 +147,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json
index a2070207b..e37f7a492 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -146,7 +147,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json
index 0d33c6e0e..965c2ebfe 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -146,7 +147,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json
index af46579c8..f1fef3093 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-example.json
@@ -249,7 +249,7 @@
"displayName": ".NET builder",
"required": true,
"description": "The image stream tag which is used to build the code.",
- "value": "dotnet:1.0"
+ "value": "dotnet:2.0"
},
{
"name": "NAMESPACE",
@@ -269,7 +269,7 @@
"name": "SOURCE_REPOSITORY_REF",
"displayName": "Git Reference",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
- "value": "dotnetcore-1.0"
+ "value": "dotnetcore-2.0"
},
{
"name": "CONTEXT_DIR",
@@ -299,7 +299,7 @@
{
"name": "DOTNET_STARTUP_PROJECT",
"displayName": "Startup Project",
- "description": "Set this to the folder containing your startup project.",
+ "description": "Set this to a project file (e.g. csproj) or a folder containing a single project file.",
"value": "app"
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json
index a2b59c2d3..c83132152 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-pgsql-persistent.json
@@ -455,7 +455,7 @@
"displayName": ".NET builder",
"required": true,
"description": "The image stream tag which is used to build the code.",
- "value": "dotnet:1.1"
+ "value": "dotnet:2.0"
},
{
"name": "NAMESPACE",
@@ -475,7 +475,7 @@
"name": "SOURCE_REPOSITORY_REF",
"displayName": "Git Reference",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
- "value": "rel/1.1-example"
+ "value": "rel/2.0-example"
},
{
"name": "CONTEXT_DIR",
@@ -485,7 +485,7 @@
{
"name": "DOTNET_STARTUP_PROJECT",
"displayName": "Startup Project",
- "description": "Set this to the folder containing your startup project.",
+ "description": "Set this to a project file (e.g. csproj) or a folder containing a single project file.",
"value": "samples/MusicStore"
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-runtime-example.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-runtime-example.json
new file mode 100644
index 000000000..e1dccf290
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dotnet-runtime-example.json
@@ -0,0 +1,412 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-runtime-example",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core Runtime Example",
+ "description": "An example .NET Core Runtime example application.",
+ "tags": "quickstart,dotnet,.net",
+ "iconClass": "icon-dotnet",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application runtime image"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}-build",
+ "annotations": {
+ "description": "Keeps track of changes in the application builder image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}-build",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_BUILD_IMAGE_STREAM_TAG}"
+ },
+ "env": [
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "value": "${DOTNET_STARTUP_PROJECT}"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "value": "${DOTNET_ASSEMBLY_NAME}"
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "value": "${DOTNET_NPM_TOOLS}"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "value": "${DOTNET_TEST_PROJECTS}"
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "value": "${DOTNET_CONFIGURATION}"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "value": "${DOTNET_RESTORE_SOURCES}"
+ },
+ {
+ "name": "DOTNET_PACK",
+ "value": "true"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}-build:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}-runtime",
+ "annotations": {
+ "description": "Defines how to chain the runtime image from the source build image"
+ }
+ },
+ "spec": {
+ "source": {
+ "dockerfile": "FROM ${DOTNET_RUNTIME_IMAGE_STREAM_TAG}\nADD app.tar.gz .",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}-build:latest"
+ },
+ "paths": [
+ {
+ "sourcePath": "/opt/app-root/app.tar.gz",
+ "destinationDir": "."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Docker",
+ "dockerStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_RUNTIME_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}-build:latest"
+ }
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}-runtime",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dotnet-runtime-app"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dotnet-runtime-app",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 15
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 30
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": []
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dotnet-runtime-example"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "DOTNET_RUNTIME_IMAGE_STREAM_TAG",
+ "displayName": ".NET Runtime Imagestream Tag",
+ "description": "The image stream tag which is used to run the application.",
+ "required": true,
+ "value": "dotnet-runtime:2.0"
+ },
+ {
+ "name": "DOTNET_BUILD_IMAGE_STREAM_TAG",
+ "displayName": ".NET builder",
+ "required": true,
+ "description": "The image stream tag which is used to build the application.",
+ "value": "dotnet:2.0"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "value": "dotnetcore-2.0"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to use a subdirectory of the source code repository"
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "displayName": "Startup Project",
+ "description": "Set this to the folder containing your startup project.",
+ "value": "app"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "displayName": "Startup Assembly",
+ "description": "Set this when the assembly name is overridden in the project file."
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "displayName": "Npm Tools",
+ "description": "Set this to a space separated list of npm tools needed to publish.",
+ "value": "bower gulp"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "displayName": "Test projects",
+ "description": "Set this to a space separated list of test projects to run before publishing."
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "displayName": "Configuration",
+ "description": "Set this to configuration (Release/Debug).",
+ "value": "Release"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "displayName": "NuGet package sources",
+ "description": "Set this to override the NuGet.config sources."
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json
index ac671cc06..6cf9d76eb 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json
@@ -74,7 +74,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -130,7 +131,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json
index ce96684a9..62f43bc0b 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "jenkins-ephemeral",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "Jenkins (Ephemeral)",
"description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
@@ -22,7 +21,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
}
@@ -43,7 +41,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -75,7 +75,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${JENKINS_SERVICE_NAME}"
}
@@ -221,8 +220,7 @@
"annotations": {
"service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
"service.openshift.io/infrastructure": "true"
- },
- "creationTimestamp": null
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json
index 34b2b920b..e9068e455 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json
@@ -3,7 +3,6 @@
"apiVersion": "v1",
"metadata": {
"name": "jenkins-persistent",
- "creationTimestamp": null,
"annotations": {
"openshift.io/display-name": "Jenkins (Persistent)",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
@@ -22,7 +21,6 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null,
"annotations": {
"template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}"
}
@@ -60,7 +58,9 @@
"apiVersion": "v1",
"metadata": {
"name": "${JENKINS_SERVICE_NAME}",
- "creationTimestamp": null
+ "annotations": {
+ "template.alpha.openshift.io/wait-for-ready": "true"
+ }
},
"spec": {
"strategy": {
@@ -92,7 +92,6 @@
},
"template": {
"metadata": {
- "creationTimestamp": null,
"labels": {
"name": "${JENKINS_SERVICE_NAME}"
}
@@ -238,8 +237,7 @@
"annotations": {
"service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
"service.openshift.io/infrastructure": "true"
- },
- "creationTimestamp": null
+ }
},
"spec": {
"ports": [
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json
index a9c365361..df3704b9f 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -152,7 +153,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json
index 53a6147d5..eb6ab33d9 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json
@@ -87,7 +87,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -152,7 +153,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json
index f07a43071..59e2e41ea 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json
@@ -93,7 +93,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -152,7 +153,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json
index a7992c988..b3d080a91 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json
@@ -93,7 +93,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to build the application"
+ "description": "Defines how to build the application",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
@@ -152,7 +153,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Defines how to deploy the application server"
+ "description": "Defines how to deploy the application server",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 844d77255..cf78b4a75 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -193,7 +193,9 @@ def hostname_valid(hostname):
"""
if (not hostname or
hostname.startswith('localhost') or
- hostname.endswith('localdomain')):
+ hostname.endswith('localdomain') or
+ # OpenShift will not allow a node with more than 63 chars in name.
+ len(hostname) > 63):
return False
return True
@@ -1907,7 +1909,6 @@ class OpenShiftFacts(object):
"""
known_roles = ['builddefaults',
'buildoverrides',
- 'clock',
'cloudprovider',
'common',
'docker',
@@ -2097,13 +2098,6 @@ class OpenShiftFacts(object):
docker['service_name'] = 'docker'
defaults['docker'] = docker
- if 'clock' in roles:
- exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405
- chrony_installed = bool(exit_code == 0)
- defaults['clock'] = dict(
- enabled=True,
- chrony_installed=chrony_installed)
-
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 05e53333d..8d35db6b5 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -1,76 +1,74 @@
"""
Ansible action plugin to execute health checks in OpenShift clusters.
"""
-# pylint: disable=wrong-import-position,missing-docstring,invalid-name
import sys
import os
+import traceback
from collections import defaultdict
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import string_types
+
try:
from __main__ import display
except ImportError:
+ # pylint: disable=ungrouped-imports; this is the standard way how to import
+ # the default display object in Ansible action plugins.
from ansible.utils.display import Display
display = Display()
-from ansible.plugins.action import ActionBase
-from ansible.module_utils.six import string_types
-
# Augment sys.path so that we can import checks from a directory relative to
# this callback plugin.
sys.path.insert(1, os.path.dirname(os.path.dirname(__file__)))
+# pylint: disable=wrong-import-position; the import statement must come after
+# the manipulation of sys.path.
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, load_checks # noqa: E402
class ActionModule(ActionBase):
+ """Action plugin to execute health checks."""
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
task_vars = task_vars or {}
- # vars are not supportably available in the callback plugin,
- # so record any it will need in the result.
+ # callback plugins cannot read Ansible vars, but we would like
+ # zz_failure_summary to have access to certain values. We do so by
+ # storing the information we need in the result.
result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
- if "openshift" not in task_vars:
- result["failed"] = True
- result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?"
- return result
-
try:
known_checks = self.load_known_checks(tmp, task_vars)
args = self._task.args
requested_checks = normalize(args.get('checks', []))
+
+ if not requested_checks:
+ result['failed'] = True
+ result['msg'] = list_known_checks(known_checks)
+ return result
+
resolved_checks = resolve_checks(requested_checks, known_checks.values())
- except OpenShiftCheckException as e:
+ except OpenShiftCheckException as exc:
result["failed"] = True
- result["msg"] = str(e)
+ result["msg"] = str(exc)
+ return result
+
+ if "openshift" not in task_vars:
+ result["failed"] = True
+ result["msg"] = "'openshift' is undefined, did 'openshift_facts' run?"
return result
result["checks"] = check_results = {}
user_disabled_checks = normalize(task_vars.get('openshift_disable_check', []))
- for check_name in resolved_checks:
- display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
- check = known_checks[check_name]
-
- if not check.is_active():
- r = dict(skipped=True, skipped_reason="Not active for this host")
- elif check_name in user_disabled_checks:
- r = dict(skipped=True, skipped_reason="Disabled by user request")
- else:
- try:
- r = check.run()
- except OpenShiftCheckException as e:
- r = dict(
- failed=True,
- msg=str(e),
- )
-
+ for name in resolved_checks:
+ display.banner("CHECK [{} : {}]".format(name, task_vars["ansible_host"]))
+ check = known_checks[name]
+ check_results[name] = run_check(name, check, user_disabled_checks)
if check.changed:
- r["changed"] = True
- check_results[check_name] = r
+ check_results[name]["changed"] = True
result["changed"] = any(r.get("changed") for r in check_results.values())
if any(r.get("failed") for r in check_results.values()):
@@ -80,22 +78,55 @@ class ActionModule(ActionBase):
return result
def load_known_checks(self, tmp, task_vars):
+ """Find all existing checks and return a mapping of names to instances."""
load_checks()
known_checks = {}
for cls in OpenShiftCheck.subclasses():
- check_name = cls.name
- if check_name in known_checks:
- other_cls = known_checks[check_name].__class__
+ name = cls.name
+ if name in known_checks:
+ other_cls = known_checks[name].__class__
raise OpenShiftCheckException(
- "non-unique check name '{}' in: '{}.{}' and '{}.{}'".format(
- check_name,
- cls.__module__, cls.__name__,
- other_cls.__module__, other_cls.__name__))
- known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
+ "duplicate check name '{}' in: '{}' and '{}'"
+ "".format(name, full_class_name(cls), full_class_name(other_cls))
+ )
+ known_checks[name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
return known_checks
+def list_known_checks(known_checks):
+ """Return text listing the existing checks and tags."""
+ # TODO: we could include a description of each check by taking it from a
+ # check class attribute (e.g., __doc__) when building the message below.
+ msg = (
+ 'This playbook is meant to run health checks, but no checks were '
+ 'requested. Set the `openshift_checks` variable to a comma-separated '
+ 'list of check names or a YAML list. Available checks:\n {}'
+ ).format('\n '.join(sorted(known_checks)))
+
+ tags = describe_tags(known_checks.values())
+
+ msg += (
+ '\n\nTags can be used as a shortcut to select multiple '
+ 'checks. Available tags and the checks they select:\n {}'
+ ).format('\n '.join(tags))
+
+ return msg
+
+
+def describe_tags(check_classes):
+ """Return a sorted list of strings describing tags and the checks they include."""
+ tag_checks = defaultdict(list)
+ for cls in check_classes:
+ for tag in cls.tags:
+ tag_checks[tag].append(cls.name)
+ tags = [
+ '@{} = {}'.format(tag, ','.join(sorted(checks)))
+ for tag, checks in tag_checks.items()
+ ]
+ return sorted(tags)
+
+
def resolve_checks(names, all_checks):
"""Returns a set of resolved check names.
@@ -123,6 +154,12 @@ def resolve_checks(names, all_checks):
if unknown_tag_names:
msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))
msg.append('Make sure there is no typo in the playbook and no files are missing.')
+ # TODO: implement a "Did you mean ...?" when the input is similar to a
+ # valid check or tag.
+ msg.append('Known checks:')
+ msg.append(' {}'.format('\n '.join(sorted(known_check_names))))
+ msg.append('Known tags:')
+ msg.append(' {}'.format('\n '.join(describe_tags(all_checks))))
raise OpenShiftCheckException('\n'.join(msg))
tag_to_checks = defaultdict(set)
@@ -146,3 +183,32 @@ def normalize(checks):
if isinstance(checks, string_types):
checks = checks.split(',')
return [name.strip() for name in checks if name.strip()]
+
+
+def run_check(name, check, user_disabled_checks):
+ """Run a single check if enabled and return a result dict."""
+ if name in user_disabled_checks:
+ return dict(skipped=True, skipped_reason="Disabled by user request")
+
+ # pylint: disable=broad-except; capturing exceptions broadly is intentional,
+ # to isolate arbitrary failures in one check from others.
+ try:
+ is_active = check.is_active()
+ except Exception as exc:
+ reason = "Could not determine if check should be run, exception: {}".format(exc)
+ return dict(skipped=True, skipped_reason=reason, exception=traceback.format_exc())
+
+ if not is_active:
+ return dict(skipped=True, skipped_reason="Not active for this host")
+
+ try:
+ return check.run()
+ except OpenShiftCheckException as exc:
+ return dict(failed=True, msg=str(exc))
+ except Exception as exc:
+ return dict(failed=True, msg=str(exc), exception=traceback.format_exc())
+
+
+def full_class_name(cls):
+ """Return the name of a class prefixed with its module name."""
+ return '{}.{}'.format(cls.__module__, cls.__name__)
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index d10200719..349655966 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -1,161 +1,223 @@
-"""
-Ansible callback plugin to give a nicely formatted summary of failures.
-"""
+"""Ansible callback plugin to print a nicely formatted summary of failures.
-# Reason: In several locations below we disable pylint protected-access
-# for Ansible objects that do not give us any public way
-# to access the full details we need to report check failures.
-# Status: disabled permanently or until Ansible object has a public API.
-# This does leave the code more likely to be broken by future Ansible changes.
+The file / module name is prefixed with `zz_` to make this plugin be loaded last
+by Ansible, thus making its output the last thing that users see.
+"""
-from pprint import pformat
+from collections import defaultdict
+import traceback
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.utils.color import stringc
+FAILED_NO_MSG = u'Failed without returning a message.'
+
+
class CallbackModule(CallbackBase):
- """
- This callback plugin stores task results and summarizes failures.
- The file name is prefixed with `zz_` to make this plugin be loaded last by
- Ansible, thus making its output the last thing that users see.
- """
+ """This callback plugin stores task results and summarizes failures."""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'failure_summary'
CALLBACK_NEEDS_WHITELIST = False
- _playbook_file = None
def __init__(self):
super(CallbackModule, self).__init__()
self.__failures = []
+ self.__playbook_file = ''
def v2_playbook_on_start(self, playbook):
super(CallbackModule, self).v2_playbook_on_start(playbook)
- # re: playbook attrs see top comment # pylint: disable=protected-access
- self._playbook_file = playbook._file_name
+ # pylint: disable=protected-access; Ansible gives us no public API to
+ # get the file name of the current playbook from a callback plugin.
+ self.__playbook_file = playbook._file_name
def v2_runner_on_failed(self, result, ignore_errors=False):
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
if not ignore_errors:
- self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
+ self.__failures.append(result)
def v2_playbook_on_stats(self, stats):
super(CallbackModule, self).v2_playbook_on_stats(stats)
- if self.__failures:
- self._print_failure_details(self.__failures)
-
- def _print_failure_details(self, failures):
- """Print a summary of failed tasks or checks."""
- self._display.display(u'\nFailure summary:\n')
-
- width = len(str(len(failures)))
- initial_indent_format = u' {{:>{width}}}. '.format(width=width)
- initial_indent_len = len(initial_indent_format.format(0))
- subsequent_indent = u' ' * initial_indent_len
- subsequent_extra_indent = u' ' * (initial_indent_len + 10)
-
- for i, failure in enumerate(failures, 1):
- entries = _format_failure(failure)
- self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), entries[0]))
- for entry in entries[1:]:
- entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent)
- indented = u'{}{}'.format(subsequent_indent, entry)
- self._display.display(indented)
-
- failed_checks = set()
- playbook_context = None
- # re: result attrs see top comment # pylint: disable=protected-access
- for failure in failures:
- # Get context from check task result since callback plugins cannot access task vars.
- # NOTE: thus context is not known unless checks run. Failures prior to checks running
- # don't have playbook_context in the results. But we only use it now when checks fail.
- playbook_context = playbook_context or failure['result']._result.get('playbook_context')
- failed_checks.update(
- name
- for name, result in failure['result']._result.get('checks', {}).items()
- if result.get('failed')
- )
- if failed_checks:
- self._print_check_failure_summary(failed_checks, playbook_context)
-
- def _print_check_failure_summary(self, failed_checks, context):
- checks = ','.join(sorted(failed_checks))
- # The purpose of specifying context is to vary the output depending on what the user was
- # expecting to happen (based on which playbook they ran). The only use currently is to
- # vary the message depending on whether the user was deliberately running checks or was
- # trying to install/upgrade and checks are just included. Other use cases may arise.
- summary = ( # default to explaining what checks are in the first place
- '\n'
- 'The execution of "{playbook}"\n'
- 'includes checks designed to fail early if the requirements\n'
- 'of the playbook are not met. One or more of these checks\n'
- 'failed. To disregard these results, you may choose to\n'
- 'disable failing checks by setting an Ansible variable:\n\n'
- ' openshift_disable_check={checks}\n\n'
- 'Failing check names are shown in the failure details above.\n'
- 'Some checks may be configurable by variables if your requirements\n'
- 'are different from the defaults; consult check documentation.\n'
- 'Variables can be set in the inventory or passed on the\n'
- 'command line using the -e flag to ansible-playbook.\n\n'
- ).format(playbook=self._playbook_file, checks=checks)
- if context in ['pre-install', 'health']:
- summary = ( # user was expecting to run checks, less explanation needed
- '\n'
- 'You may choose to configure or disable failing checks by\n'
- 'setting Ansible variables. To disable those above:\n\n'
- ' openshift_disable_check={checks}\n\n'
- 'Consult check documentation for configurable variables.\n'
- 'Variables can be set in the inventory or passed on the\n'
- 'command line using the -e flag to ansible-playbook.\n\n'
- ).format(checks=checks)
- self._display.display(summary)
-
-
-# re: result attrs see top comment # pylint: disable=protected-access
-def _format_failure(failure):
+ # pylint: disable=broad-except; capturing exceptions broadly is
+ # intentional, to isolate arbitrary failures in this callback plugin.
+ try:
+ if self.__failures:
+ self._display.display(failure_summary(self.__failures, self.__playbook_file))
+ except Exception:
+ msg = stringc(
+ u'An error happened while generating a summary of failures:\n'
+ u'{}'.format(traceback.format_exc()), C.COLOR_WARN)
+ self._display.v(msg)
+
+
+def failure_summary(failures, playbook):
+ """Return a summary of failed tasks, including details on health checks."""
+ if not failures:
+ return u''
+
+ # NOTE: because we don't have access to task_vars from callback plugins, we
+ # store the playbook context in the task result when the
+ # openshift_health_check action plugin is used, and we use this context to
+ # customize the error message.
+ # pylint: disable=protected-access; Ansible gives us no sufficient public
+ # API on TaskResult objects.
+ context = next((
+ context for context in
+ (failure._result.get('playbook_context') for failure in failures)
+ if context
+ ), None)
+
+ failures = [failure_to_dict(failure) for failure in failures]
+ failures = deduplicate_failures(failures)
+
+ summary = [u'', u'', u'Failure summary:', u'']
+
+ width = len(str(len(failures)))
+ initial_indent_format = u' {{:>{width}}}. '.format(width=width)
+ initial_indent_len = len(initial_indent_format.format(0))
+ subsequent_indent = u' ' * initial_indent_len
+ subsequent_extra_indent = u' ' * (initial_indent_len + 10)
+
+ for i, failure in enumerate(failures, 1):
+ entries = format_failure(failure)
+ summary.append(u'\n{}{}'.format(initial_indent_format.format(i), entries[0]))
+ for entry in entries[1:]:
+ entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent)
+ indented = u'{}{}'.format(subsequent_indent, entry)
+ summary.append(indented)
+
+ failed_checks = set()
+ for failure in failures:
+ failed_checks.update(name for name, message in failure['checks'])
+ if failed_checks:
+ summary.append(check_failure_footer(failed_checks, context, playbook))
+
+ return u'\n'.join(summary)
+
+
+def failure_to_dict(failed_task_result):
+ """Extract information out of a failed TaskResult into a dict.
+
+ The intent is to transform a TaskResult object into something easier to
+ manipulate. TaskResult is ansible.executor.task_result.TaskResult.
+ """
+ # pylint: disable=protected-access; Ansible gives us no sufficient public
+ # API on TaskResult objects.
+ _result = failed_task_result._result
+ return {
+ 'host': failed_task_result._host.get_name(),
+ 'play': play_name(failed_task_result._task),
+ 'task': failed_task_result.task_name,
+ 'msg': _result.get('msg', FAILED_NO_MSG),
+ 'checks': tuple(
+ (name, result.get('msg', FAILED_NO_MSG))
+ for name, result in sorted(_result.get('checks', {}).items())
+ if result.get('failed')
+ ),
+ }
+
+
+def play_name(obj):
+ """Given a task or block, return the name of its parent play.
+
+ This is loosely inspired by ansible.playbook.base.Base.dump_me.
+ """
+ # pylint: disable=protected-access; Ansible gives us no sufficient public
+ # API to implement this.
+ if not obj:
+ return ''
+ if hasattr(obj, '_play'):
+ return obj._play.get_name()
+ return play_name(getattr(obj, '_parent'))
+
+
+def deduplicate_failures(failures):
+ """Group together similar failures from different hosts.
+
+ Returns a new list of failures such that identical failures from different
+ hosts are grouped together in a single entry. The relative order of failures
+ is preserved.
+ """
+ groups = defaultdict(list)
+ for failure in failures:
+ group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
+ groups[group_key].append(failure)
+ result = []
+ for failure in failures:
+ group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
+ if group_key not in groups:
+ continue
+ failure['host'] = tuple(sorted(g_failure['host'] for g_failure in groups.pop(group_key)))
+ result.append(failure)
+ return result
+
+
+def format_failure(failure):
"""Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
by a newline separator when output to the user."""
- result = failure['result']
- host = result._host.get_name()
- play = _get_play(result._task)
- if play:
- play = play.get_name()
- task = result._task.get_name()
- msg = result._result.get('msg', u'???')
+ host = u', '.join(failure['host'])
+ play = failure['play']
+ task = failure['task']
+ msg = failure['msg']
+ checks = failure['checks']
fields = (
- (u'Host', host),
+ (u'Hosts', host),
(u'Play', play),
(u'Task', task),
(u'Message', stringc(msg, C.COLOR_ERROR)),
)
- if 'checks' in result._result:
- fields += ((u'Details', _format_failed_checks(result._result['checks'])),)
+ if checks:
+ fields += ((u'Details', format_failed_checks(checks)),)
row_format = '{:10}{}'
return [row_format.format(header + u':', body) for header, body in fields]
-def _format_failed_checks(checks):
+def format_failed_checks(checks):
"""Return pretty-formatted text describing checks that failed."""
- failed_check_msgs = []
- for check, body in checks.items():
- if body.get('failed', False): # only show the failed checks
- msg = body.get('msg', u"Failed without returning a message")
- failed_check_msgs.append('check "%s":\n%s' % (check, msg))
- if failed_check_msgs:
- return stringc("\n\n".join(failed_check_msgs), C.COLOR_ERROR)
- else: # something failed but no checks will admit to it, so dump everything
- return stringc(pformat(checks), C.COLOR_ERROR)
-
-
-# This is inspired by ansible.playbook.base.Base.dump_me.
-# re: play/task/block attrs see top comment # pylint: disable=protected-access
-def _get_play(obj):
- """Given a task or block, recursively try to find its parent play."""
- if hasattr(obj, '_play'):
- return obj._play
- if getattr(obj, '_parent'):
- return _get_play(obj._parent)
+ messages = []
+ for name, message in checks:
+ messages.append(u'check "{}":\n{}'.format(name, message))
+ return stringc(u'\n\n'.join(messages), C.COLOR_ERROR)
+
+
+def check_failure_footer(failed_checks, context, playbook):
+ """Return a textual explanation about checks depending on context.
+
+ The purpose of specifying context is to vary the output depending on what
+ the user was expecting to happen (based on which playbook they ran). The
+ only use currently is to vary the message depending on whether the user was
+ deliberately running checks or was trying to install/upgrade and checks are
+ just included. Other use cases may arise.
+ """
+ checks = ','.join(sorted(failed_checks))
+ summary = [u'']
+ if context in ['pre-install', 'health', 'adhoc']:
+ # User was expecting to run checks, less explanation needed.
+ summary.extend([
+ u'You may configure or disable checks by setting Ansible '
+ u'variables. To disable those above, set:',
+ u' openshift_disable_check={checks}'.format(checks=checks),
+ u'Consult check documentation for configurable variables.',
+ ])
+ else:
+ # User may not be familiar with the checks, explain what checks are in
+ # the first place.
+ summary.extend([
+ u'The execution of "{playbook}" includes checks designed to fail '
+ u'early if the requirements of the playbook are not met. One or '
+ u'more of these checks failed. To disregard these results,'
+ u'explicitly disable checks by setting an Ansible variable:'.format(playbook=playbook),
+ u' openshift_disable_check={checks}'.format(checks=checks),
+ u'Failing check names are shown in the failure details above. '
+ u'Some checks may be configurable by variables if your requirements '
+ u'are different from the defaults; consult check documentation.',
+ ])
+ summary.append(
+ u'Variables can be set in the inventory or passed on the command line '
+ u'using the -e flag to ansible-playbook.'
+ )
+ return u'\n'.join(summary)
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index f9babebb9..db3c0b654 100644
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -24,11 +24,17 @@ from ansible.module_utils.basic import AnsibleModule
# Python 3, we use six for cross compatibility in this module alone:
from ansible.module_utils.six import string_types
-IMPORT_EXCEPTION = None
+YUM_IMPORT_EXCEPTION = None
+DNF_IMPORT_EXCEPTION = None
try:
import yum # pylint: disable=import-error
except ImportError as err:
- IMPORT_EXCEPTION = err
+ YUM_IMPORT_EXCEPTION = err
+
+try:
+ import dnf # pylint: disable=import-error
+except ImportError as err:
+ DNF_IMPORT_EXCEPTION = err
class AosVersionException(Exception):
@@ -43,12 +49,20 @@ def main():
module = AnsibleModule(
argument_spec=dict(
package_list=dict(type="list", required=True),
+ package_mgr=dict(type="str", required=True),
),
supports_check_mode=True
)
- if IMPORT_EXCEPTION:
- module.fail_json(msg="aos_version module could not import yum: %s" % IMPORT_EXCEPTION)
+ # determine the package manager to use
+ package_mgr = module.params['package_mgr']
+ if package_mgr not in ('yum', 'dnf'):
+ module.fail_json(msg="package_mgr must be one of: yum, dnf")
+ pkg_mgr_exception = dict(yum=YUM_IMPORT_EXCEPTION, dnf=DNF_IMPORT_EXCEPTION)[package_mgr]
+ if pkg_mgr_exception:
+ module.fail_json(
+ msg="aos_version module could not import {}: {}".format(package_mgr, pkg_mgr_exception)
+ )
# determine the packages we will look for
package_list = module.params['package_list']
@@ -67,7 +81,7 @@ def main():
# get the list of packages available and complain if anything is wrong
try:
- pkgs = _retrieve_available_packages(expected_pkg_names)
+ pkgs = _retrieve_available_packages(package_mgr, expected_pkg_names)
if versioned_pkgs:
_check_precise_version_found(pkgs, _to_dict(versioned_pkgs))
_check_higher_version_found(pkgs, _to_dict(versioned_pkgs))
@@ -82,10 +96,7 @@ def _to_dict(pkg_list):
return {pkg["name"]: pkg for pkg in pkg_list}
-def _retrieve_available_packages(expected_pkgs):
- # search for package versions available for openshift pkgs
- yb = yum.YumBase() # pylint: disable=invalid-name
-
+def _retrieve_available_packages(pkg_mgr, expected_pkgs):
# The openshift excluder prevents unintended updates to openshift
# packages by setting yum excludes on those packages. See:
# https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates
@@ -94,17 +105,45 @@ def _retrieve_available_packages(expected_pkgs):
# attempt to determine what packages are available via yum they may
# be excluded. So, for our purposes here, disable excludes to see
# what will really be available during an install or upgrade.
- yb.conf.disable_excludes = ['all']
- try:
- pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
- except yum.Errors.PackageSackError as excinfo:
- # you only hit this if *none* of the packages are available
- raise AosVersionException('\n'.join([
- 'Unable to find any OpenShift packages.',
- 'Check your subscription and repo settings.',
- str(excinfo),
- ]))
+ if pkg_mgr == "yum":
+ # search for package versions available for openshift pkgs
+ yb = yum.YumBase() # pylint: disable=invalid-name
+
+ yb.conf.disable_excludes = ['all']
+
+ try:
+ pkgs = yb.rpmdb.returnPackages(patterns=expected_pkgs)
+ pkgs += yb.pkgSack.returnPackages(patterns=expected_pkgs)
+ except yum.Errors.PackageSackError as excinfo:
+ # you only hit this if *none* of the packages are available
+ raise AosVersionException('\n'.join([
+ 'Unable to find any OpenShift packages.',
+ 'Check your subscription and repo settings.',
+ str(excinfo),
+ ]))
+ elif pkg_mgr == "dnf":
+ dbase = dnf.Base() # pyling: disable=invalid-name
+
+ dbase.conf.disable_excludes = ['all']
+ dbase.read_all_repos()
+ dbase.fill_sack(load_system_repo=False, load_available_repos=True)
+
+ dquery = dbase.sack.query()
+ aquery = dquery.available()
+ iquery = dquery.installed()
+
+ available_pkgs = list(aquery.filter(name=expected_pkgs))
+ installed_pkgs = list(iquery.filter(name=expected_pkgs))
+ pkgs = available_pkgs + installed_pkgs
+
+ if not pkgs:
+ # pkgs list is empty, raise because no expected packages found
+ raise AosVersionException('\n'.join([
+ 'Unable to find any OpenShift packages.',
+ 'Check your subscription and repo settings.',
+ ]))
+
return pkgs
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 07ec6f7ef..02ee1d0f9 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -242,7 +242,7 @@ def load_checks(path=None, subpkg=""):
modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
continue
- if name.endswith(".py") and name not in LOADER_EXCLUDES:
+ if name.endswith(".py") and not name.startswith(".") and name not in LOADER_EXCLUDES:
modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
return modules
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index 6d1dea9ce..f302fd14b 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -115,10 +115,7 @@ class DiskAvailability(OpenShiftCheck):
return {
'failed': True,
- 'msg': (
- 'Available disk space in "{}" ({:.1f} GB) '
- 'is below minimum recommended ({:.1f} GB)'
- ).format(path, free_gb, recommended_gb)
+ 'msg': msg,
}
return {}
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 85a922f86..866c74d7c 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -32,6 +32,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# we use python-docker-py to check local docker for images, and skopeo
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
+ skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false"
def is_active(self):
"""Skip hosts with unsupported deployment types."""
@@ -67,8 +68,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"failed": True,
"msg": (
"One or more required Docker images are not available:\n {}\n"
- "Configured registries: {}"
- ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
+ "Configured registries: {}\n"
+ "Checked by: {}"
+ ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries),
+ self.skopeo_img_check_command),
}
return {}
@@ -168,7 +171,9 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
registries = [registry]
for registry in registries:
- args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)}
+ args = {
+ "_raw_params": self.skopeo_img_check_command + " docker://{}/{}".format(registry, image)
+ }
result = self.execute_module("command", args)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 8b780114f..0b795b6c4 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -46,6 +46,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
check_multi_minor_release = deployment_type in ['openshift-enterprise']
args = {
+ "package_mgr": self.get_var("ansible_pkg_mgr"),
"package_list": [
{
"name": "openvswitch",
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index f5161d6f5..c109ebd24 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -80,7 +80,8 @@ def skipped(result):
None,
{},
])
-def test_action_plugin_missing_openshift_facts(plugin, task_vars):
+def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch):
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result, msg_has=['openshift_facts'])
@@ -94,7 +95,7 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,
result = plugin.run(tmp=None, task_vars=task_vars)
- assert failed(result, msg_has=['unique', 'duplicate_name', 'FakeCheck'])
+ assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
@@ -217,24 +218,21 @@ def test_resolve_checks_ok(names, all_checks, expected):
assert resolve_checks(names, all_checks) == expected
-@pytest.mark.parametrize('names,all_checks,words_in_exception,words_not_in_exception', [
+@pytest.mark.parametrize('names,all_checks,words_in_exception', [
(
['testA', 'testB'],
[],
['check', 'name', 'testA', 'testB'],
- ['tag', 'group', '@'],
),
(
['@group'],
[],
['tag', 'name', 'group'],
- ['check', '@'],
),
(
['testA', 'testB', '@group'],
[],
['check', 'name', 'testA', 'testB', 'tag', 'group'],
- ['@'],
),
(
['testA', 'testB', '@group'],
@@ -244,13 +242,10 @@ def test_resolve_checks_ok(names, all_checks, expected):
fake_check('from_group_2', ['preflight', 'group']),
],
['check', 'name', 'testA', 'testB'],
- ['tag', 'group', '@'],
),
])
-def test_resolve_checks_failure(names, all_checks, words_in_exception, words_not_in_exception):
+def test_resolve_checks_failure(names, all_checks, words_in_exception):
with pytest.raises(Exception) as excinfo:
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
- for word in words_not_in_exception:
- assert word not in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py
index 3cbd65507..244a1f0fa 100644
--- a/roles/openshift_health_checker/test/conftest.py
+++ b/roles/openshift_health_checker/test/conftest.py
@@ -7,5 +7,6 @@ openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__))
sys.path[1:1] = [
openshift_health_checker_path,
os.path.join(openshift_health_checker_path, 'action_plugins'),
+ os.path.join(openshift_health_checker_path, 'callback_plugins'),
os.path.join(openshift_health_checker_path, 'library'),
]
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 6054d3f3e..e871f39f0 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -5,6 +5,7 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
return dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type=deployment_type)),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
@@ -27,6 +28,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type='origin')),
openshift_image_tag='v0',
openshift_deployment_type='origin',
diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py
new file mode 100644
index 000000000..0fc258133
--- /dev/null
+++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py
@@ -0,0 +1,70 @@
+from zz_failure_summary import deduplicate_failures
+
+import pytest
+
+
+@pytest.mark.parametrize('failures,deduplicated', [
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': 'One or more checks failed',
+ },
+ ],
+ [
+ {
+ 'host': ('master1',),
+ 'msg': 'One or more checks failed',
+ },
+ ],
+ ),
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': 'One or more checks failed',
+ },
+ {
+ 'host': 'node1',
+ 'msg': 'One or more checks failed',
+ },
+ ],
+ [
+ {
+ 'host': ('master1', 'node1'),
+ 'msg': 'One or more checks failed',
+ },
+ ],
+ ),
+ (
+ [
+ {
+ 'host': 'node1',
+ 'msg': 'One or more checks failed',
+ 'checks': (('test_check', 'error message'),),
+ },
+ {
+ 'host': 'master2',
+ 'msg': 'Some error happened',
+ },
+ {
+ 'host': 'master1',
+ 'msg': 'One or more checks failed',
+ 'checks': (('test_check', 'error message'),),
+ },
+ ],
+ [
+ {
+ 'host': ('master1', 'node1'),
+ 'msg': 'One or more checks failed',
+ 'checks': (('test_check', 'error message'),),
+ },
+ {
+ 'host': ('master2',),
+ 'msg': 'Some error happened',
+ },
+ ],
+ ),
+])
+def test_deduplicate_failures(failures, deduplicated):
+ assert deduplicate_failures(failures) == deduplicated
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 13cbfb14e..c26df3afa 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -1,9 +1,12 @@
---
-r_openshift_hosted_router_firewall_enabled: True
-r_openshift_hosted_router_use_firewalld: False
+r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-r_openshift_hosted_registry_firewall_enabled: True
-r_openshift_hosted_registry_use_firewalld: False
+r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_hosted_router_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
+openshift_hosted_registry_wait: "{{ not openshift_master_bootstrap_enabled | default(True) }}"
registry_volume_claim: 'registry-claim'
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 6f012aed1..d73c290ff 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -61,6 +61,14 @@
openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
when: openshift_push_via_dns | default(false) | bool
+- name: Update registry proxy settings for dc/docker-registry
+ set_fact:
+ openshift_hosted_registry_env_vars: "{{ {'HTTPS_PROXY': (openshift.common.https_proxy | default('')),
+ 'HTTP_PROXY': (openshift.common.http_proxy | default('')),
+ 'NO_PROXY': (openshift.common.no_proxy | default(''))}
+ | combine(openshift_hosted_registry_env_vars) }}"
+ when: (openshift.common.https_proxy | default(False)) or (openshift.common.http_proxy | default('')) != ''
+
- name: Create the registry service account
oc_serviceaccount:
name: "{{ openshift_hosted_registry_serviceaccount }}"
@@ -129,34 +137,36 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
-- name: Ensure OpenShift registry correctly rolls out (best-effort today)
- command: |
- oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig
- async: 600
- poll: 15
- failed_when: false
-
-- name: Determine the latest version of the OpenShift registry deployment
- command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .status.latestVersion }'
- register: openshift_hosted_registry_latest_version
-
-- name: Sanity-check that the OpenShift registry rolled out correctly
- command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
- register: openshift_hosted_registry_rc_phase
- until: "'Running' not in openshift_hosted_registry_rc_phase.stdout"
- delay: 15
- retries: 40
- failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout"
+- when: openshift_hosted_registry_wait
+ block:
+ - name: Ensure OpenShift registry correctly rolls out (best-effort today)
+ command: |
+ oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \
+ --namespace {{ openshift_hosted_registry_namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig
+ async: 600
+ poll: 15
+ failed_when: false
+
+ - name: Determine the latest version of the OpenShift registry deployment
+ command: |
+ {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
+ --namespace {{ openshift_hosted_registry_namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
+ -o jsonpath='{ .status.latestVersion }'
+ register: openshift_hosted_registry_latest_version
+
+ - name: Sanity-check that the OpenShift registry rolled out correctly
+ command: |
+ {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
+ --namespace {{ openshift_hosted_registry_namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
+ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
+ register: openshift_hosted_registry_rc_phase
+ until: "'Running' not in openshift_hosted_registry_rc_phase.stdout"
+ delay: 15
+ retries: 40
+ failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout"
- include: storage/glusterfs.yml
when:
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index a18e6eea9..a8a6f6fc8 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -37,6 +37,9 @@
hostnames:
- "{{ docker_registry_service.results.clusterip }}"
- "{{ docker_registry_route.results[0].spec.host }}"
+ - "{{ openshift_hosted_registry_name }}.default.svc"
+ - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}"
+ - "{{ openshift_hosted_registry_routehost }}"
cert: "{{ docker_registry_cert_path }}"
key: "{{ docker_registry_key_path }}"
expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}"
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 8aaba0f3c..8553a8098 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -2,7 +2,7 @@
- include: s3.yml
when: openshift.hosted.registry.storage.provider == 's3'
-- name: Ensure the resgistry secret exists
+- name: Ensure the registry secret exists
oc_secret:
name: "{{ registry_config_secret_name }}"
state: present
@@ -10,6 +10,19 @@
- path: /tmp/config.yml
data: "{{ lookup('template', 'registry_config.j2') }}"
register: registry_config_out
+ when: openshift_hosted_registry_storage_gcs_keyfile is not defined
+
+- name: Ensure the registry secret exists for GCS
+ oc_secret:
+ name: "{{ registry_config_secret_name }}"
+ state: present
+ contents:
+ - path: /tmp/config.yml
+ data: "{{ lookup('template', 'registry_config.j2') }}"
+ - path: /tmp/gcs.json
+ data: "{{ lookup('file', openshift_hosted_registry_storage_gcs_keyfile) | string }}"
+ register: registry_config_out
+ when: openshift_hosted_registry_storage_gcs_keyfile is defined
- name: Add secrets to registry service account
oc_serviceaccount_secret:
diff --git a/roles/openshift_hosted/tasks/registry/storage/registry_config_secret.j2 b/roles/openshift_hosted/tasks/registry/storage/registry_config_secret.j2
deleted file mode 120000
index b9e82c1ea..000000000
--- a/roles/openshift_hosted/tasks/registry/storage/registry_config_secret.j2
+++ /dev/null
@@ -1 +0,0 @@
-../../../templates/registry_config_secret.j2 \ No newline at end of file
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index f2661bcef..68ec7233e 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -94,36 +94,38 @@
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-- name: Ensure OpenShift router correctly rolls out (best-effort today)
- command: |
- {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
- --namespace {{ item.namespace | default('default') }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig
- async: 600
- poll: 15
- with_items: "{{ openshift_hosted_routers }}"
- failed_when: false
+- when: openshift_hosted_router_wait
+ block:
+ - name: Ensure OpenShift router correctly rolls out (best-effort today)
+ command: |
+ {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace | default('default') }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig
+ async: 600
+ poll: 15
+ with_items: "{{ openshift_hosted_routers }}"
+ failed_when: false
-- name: Determine the latest version of the OpenShift router deployment
- command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
- --namespace {{ item.namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .status.latestVersion }'
- register: openshift_hosted_routers_latest_version
- with_items: "{{ openshift_hosted_routers }}"
+ - name: Determine the latest version of the OpenShift router deployment
+ command: |
+ {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
+ -o jsonpath='{ .status.latestVersion }'
+ register: openshift_hosted_routers_latest_version
+ with_items: "{{ openshift_hosted_routers }}"
-- name: Poll for OpenShift router deployment success
- command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
- --namespace {{ item.0.namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
- register: openshift_hosted_router_rc_phase
- until: "'Running' not in openshift_hosted_router_rc_phase.stdout"
- delay: 15
- retries: 40
- failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout"
- with_together:
- - "{{ openshift_hosted_routers }}"
- - "{{ openshift_hosted_routers_latest_version.results }}"
+ - name: Poll for OpenShift router deployment success
+ command: |
+ {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ --namespace {{ item.0.namespace }} \
+ --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
+ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
+ register: openshift_hosted_router_rc_phase
+ until: "'Running' not in openshift_hosted_router_rc_phase.stdout"
+ delay: 15
+ retries: 40
+ failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout"
+ with_together:
+ - "{{ openshift_hosted_routers }}"
+ - "{{ openshift_hosted_routers_latest_version.results }}"
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index fc9272679..61da452de 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -60,7 +60,7 @@ storage:
gcs:
bucket: {{ openshift_hosted_registry_storage_gcs_bucket }}
{% if openshift_hosted_registry_storage_gcs_keyfile is defined %}
- keyfile: {{ openshift_hosted_registry_storage_gcs_keyfile }}
+ keyfile: /etc/registry/gcs.json
{% endif -%}
{% if openshift_hosted_registry_storage_gcs_rootdirectory is defined %}
rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory }}
diff --git a/roles/openshift_hosted/templates/registry_config_secret.j2 b/roles/openshift_hosted/templates/registry_config_secret.j2
deleted file mode 100644
index ca68544ec..000000000
--- a/roles/openshift_hosted/templates/registry_config_secret.j2
+++ /dev/null
@@ -1,9 +0,0 @@
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: registry-config
- annotations:
- provider: {{ openshift.hosted.registry.storage.provider }}
-data:
- config.yml: {{ registry_config }}
diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index 3f6409233..41a2b12a2 100644
--- a/roles/openshift_loadbalancer/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -1,6 +1,6 @@
---
-r_openshift_loadbalancer_firewall_enabled: True
-r_openshift_loadbalancer_use_firewalld: False
+r_openshift_loadbalancer_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_loadbalancer_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
haproxy_frontends:
- name: main
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 84ead3548..f283261c4 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -15,14 +15,25 @@ to the list of persisted [node labels](https://docs.openshift.org/latest/install
###Required vars:
- `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging.
-- `openshift_logging_upgrade_logging`: When `True` the `openshift_logging` role will upgrade Aggregated Logging.
-When both `openshift_logging_install_logging` and `openshift_logging_upgrade_logging` are `False` the `openshift_logging` role will uninstall Aggregated Logging.
+When `openshift_logging_install_logging` is set to `False` the `openshift_logging` role will uninstall Aggregated Logging.
###Optional vars:
-
+- `openshift_logging_purge_logging`: When `openshift_logging_install_logging` is set to 'False' to trigger uninstalation and `openshift_logging_purge_logging` is set to 'True', it will completely and irreversibly remove all logging persistent data including PVC. Defaults to 'False'.
- `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'.
+- `openshift_logging_curator_image_prefix`: Setting the image prefix for Curator image. Defaults to `openshift_logging_image_prefix`.
+- `openshift_logging_elasticsearch_image_prefix`: Setting the image prefix for Elasticsearch image. Defaults to `openshift_logging_image_prefix`.
+- `openshift_logging_fluentd_image_prefix`: Setting the image prefix for Fluentd image. Defaults to `openshift_logging_image_prefix`.
+- `openshift_logging_kibana_image_prefix`: Setting the image prefix for Kibana image. Defaults to `openshift_logging_image_prefix`.
+- `openshift_logging_kibana_proxy_image_prefix`: Setting the image prefix for Kibana proxy image. Defaults to `openshift_logging_image_prefix`.
+- `openshift_logging_mux_image_prefix`: Setting the image prefix for Mux image. Defaults to `openshift_logging_image_prefix`.
- `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'.
+- `openshift_logging_curator_image_version`: Setting the image version for Curator image. Defaults to `openshift_logging_image_version`.
+- `openshift_logging_elasticsearch_image_version`: Setting the image version for Elasticsearch image. Defaults to `openshift_logging_image_version`.
+- `openshift_logging_fluentd_image_version`: Setting the image version for Fluentd image. Defaults to `openshift_logging_image_version`.
+- `openshift_logging_kibana_image_version`: Setting the image version for Kibana image. Defaults to `openshift_logging_image_version`.
+- `openshift_logging_kibana_proxy_image_version`: Setting the image version for Kibana proxy image. Defaults to `openshift_logging_image_version`.
+- `openshift_logging_mux_image_version`: Setting the image version for Mux image. Defaults to `openshift_logging_image_version`.
- `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.
- `openshift_logging_master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.{{openshift.common.dns_domain}}'.
- `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:{{openshift.master.api_port}}'.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 8b0f4cb62..716f0e002 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -8,6 +8,7 @@ openshift_logging_labels: {}
openshift_logging_label_key: ""
openshift_logging_label_value: ""
openshift_logging_install_logging: True
+openshift_logging_purge_logging: False
openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
openshift_logging_curator_default_days: 30
@@ -84,7 +85,7 @@ openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
-openshift_logging_es_cpu_limit: null
+openshift_logging_es_cpu_limit: 1000m
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
@@ -125,7 +126,7 @@ openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
-openshift_logging_es_ops_cpu_limit: null
+openshift_logging_es_ops_cpu_limit: 1000m
openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default('') }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
new file mode 100644
index 000000000..fcb4c94d3
--- /dev/null
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -0,0 +1,17 @@
+---
+- oc_obj:
+ state: list
+ kind: project
+ name: "{{ item }}"
+ with_items: "{{ __default_logging_ops_projects }}"
+ register: __logging_ops_projects
+
+- name: Annotate Operations Projects
+ oc_edit:
+ kind: ns
+ name: "{{ item.item }}"
+ separator: '#'
+ content:
+ metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ with_items: "{{ __logging_ops_projects.results }}"
+ when: item.results.stderr is not defined
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 6d023a02d..45298e345 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -14,6 +14,16 @@
- templates
- ds
+# return all persistent volume claims as well if purge is set
+- name: delete logging pvc objects
+ oc_obj:
+ state: absent
+ kind: pvc
+ namespace: "{{ openshift_logging_namespace }}"
+ selector: "logging-infra"
+ when:
+ - openshift_logging_purge_logging | default(false) | bool
+
# delete the oauthclient
- name: delete oauthclient kibana-proxy
oc_obj:
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 464e8594f..a77df9986 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -132,6 +132,8 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -161,6 +163,8 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
@@ -181,8 +185,6 @@
openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
- openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
- openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}"
openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}"
openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}"
@@ -197,8 +199,6 @@
openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
- openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
- openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"
openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
@@ -216,6 +216,7 @@
when:
- openshift_logging_use_ops | bool
+- include: annotate_ops_projects.yaml
## Curator
- include_role:
@@ -226,8 +227,6 @@
openshift_logging_curator_es_host: "{{ openshift_logging_es_host }}"
openshift_logging_curator_es_port: "{{ openshift_logging_es_port }}"
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
- openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
- openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
- include_role:
@@ -239,8 +238,6 @@
openshift_logging_curator_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
- openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
- openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
@@ -256,8 +253,6 @@
openshift_logging_mux_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"
openshift_logging_mux_namespace: "{{ openshift_logging_namespace }}"
openshift_logging_mux_master_url: "{{ openshift_logging_master_url }}"
- openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix }}"
- openshift_logging_mux_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_mux_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
when:
- openshift_logging_use_mux | bool
@@ -269,8 +264,6 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_fluentd_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"
- openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix }}"
- openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_fluentd_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}"
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
index e561b41e2..01809fddf 100644
--- a/roles/openshift_logging/vars/main.yaml
+++ b/roles/openshift_logging/vars/main.yaml
@@ -6,3 +6,5 @@ es_ops_node_quorum: "{{ (openshift_logging_es_ops_cluster_size | int/2 | round(0
es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size | int}}"
es_log_appenders: ['file', 'console']
+
+__default_logging_ops_projects: ['default', 'openshift', 'openshift-infra', 'kube-system']
diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml
index 82ffb2f93..17807b644 100644
--- a/roles/openshift_logging_curator/defaults/main.yml
+++ b/roles/openshift_logging_curator/defaults/main.yml
@@ -1,7 +1,7 @@
---
### General logging settings
-openshift_logging_curator_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
-openshift_logging_curator_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+openshift_logging_curator_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local"
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 3113fb3c9..6e8fab2b5 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -86,7 +86,7 @@
component: "{{ curator_component }}"
logging_component: curator
deploy_name: "{{ curator_name }}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ image: "{{openshift_logging_curator_image_prefix}}logging-curator:{{openshift_logging_curator_image_version}}"
es_host: "{{ openshift_logging_curator_es_host }}"
es_port: "{{ openshift_logging_curator_es_port }}"
curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index 6431f86d9..e74918a40 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -44,6 +44,8 @@ spec:
cpu: "{{curator_cpu_limit}}"
{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+ requests:
+ memory: "{{curator_memory_limit}}"
{% endif %}
env:
-
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index 0690bf114..75bd479be 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -1,7 +1,7 @@
---
### Common settings
-openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
-openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_elasticsearch_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+openshift_logging_elasticsearch_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
openshift_logging_elasticsearch_namespace: logging
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 28c3ffd96..1e800b1d6 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -229,7 +229,7 @@
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
vars:
obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
@@ -243,7 +243,7 @@
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
vars:
obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
when:
@@ -277,7 +277,7 @@
component: "{{ es_component }}"
logging_component: elasticsearch
deploy_name: "{{ es_deploy_name }}"
- image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}"
+ image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index cbe6b89f2..5f2932541 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -48,7 +48,7 @@ spec:
cpu: "{{es_cpu_limit}}"
{% endif %}
requests:
- memory: "512Mi"
+ memory: "{{es_memory_limit}}"
ports:
-
containerPort: 9200
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index a53bbd2df..30d3d854a 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -1,7 +1,7 @@
---
### General logging settings
-openshift_logging_fluentd_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
-openshift_logging_fluentd_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_fluentd_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
openshift_logging_fluentd_namespace: logging
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 39dffba19..a4afb6618 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -28,7 +28,7 @@ spec:
{{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}"
containers:
- name: "{{ daemonset_container_name }}"
- image: "{{ openshift_logging_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_image_version }}"
+ image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}"
imagePullPolicy: Always
securityContext:
privileged: true
@@ -36,6 +36,8 @@ spec:
limits:
cpu: {{ openshift_logging_fluentd_cpu_limit }}
memory: {{ openshift_logging_fluentd_memory_limit }}
+ requests:
+ memory: {{ openshift_logging_fluentd_memory_limit }}
volumeMounts:
- name: runlogjournal
mountPath: /run/log/journal
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index b2556fd71..ee265bb14 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -2,8 +2,8 @@
### Common settings
openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local"
openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local"
-openshift_logging_kibana_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
-openshift_logging_kibana_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+openshift_logging_kibana_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
openshift_logging_kibana_namespace: logging
@@ -24,9 +24,11 @@ openshift_logging_kibana_edge_term_policy: Redirect
openshift_logging_kibana_ops_deployment: false
# Proxy settings
+openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
-openshift_logging_kibana_proxy_memory_limit: 96Mi
+openshift_logging_kibana_proxy_memory_limit: 256Mi
#The absolute path on the control node to the cert file to use
#for the public facing kibana certs
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 166f102f7..e17e8c1f2 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -225,8 +225,8 @@
component: "{{ kibana_component }}"
logging_component: kibana
deploy_name: "{{ kibana_name }}"
- image: "{{ openshift_logging_image_prefix }}logging-kibana:{{ openshift_logging_image_version }}"
- proxy_image: "{{ openshift_logging_image_prefix }}logging-auth-proxy:{{ openshift_logging_image_version }}"
+ image: "{{ openshift_logging_kibana_image_prefix }}logging-kibana:{{ openshift_logging_kibana_image_version }}"
+ proxy_image: "{{ openshift_logging_kibana_proxy_image_prefix }}logging-auth-proxy:{{ openshift_logging_kibana_proxy_image_version }}"
es_host: "{{ openshift_logging_kibana_es_host }}"
es_port: "{{ openshift_logging_kibana_es_port }}"
kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 512d99d06..da1386d3e 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -46,6 +46,8 @@ spec:
{% endif %}
{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
+ requests:
+ memory: "{{ kibana_memory_limit }}"
{% endif %}
{% endif %}
env:
@@ -82,6 +84,8 @@ spec:
{% endif %}
{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
+ requests:
+ memory: "{{ kibana_proxy_memory_limit }}"
{% endif %}
{% endif %}
ports:
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 7a3da9b4c..68412aec8 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -1,7 +1,7 @@
---
### General logging settings
-openshift_logging_mux_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
-openshift_logging_mux_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+openshift_logging_mux_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_mux_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
openshift_logging_mux_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 8ec93de7d..2ec863afa 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -165,7 +165,7 @@
component: mux
logging_component: mux
deploy_name: "logging-{{ component }}"
- image: "{{ openshift_logging_image_prefix }}logging-fluentd:{{ openshift_logging_image_version }}"
+ image: "{{ openshift_logging_mux_image_prefix }}logging-fluentd:{{ openshift_logging_mux_image_version }}"
es_host: "{{ openshift_logging_mux_app_host }}"
es_port: "{{ openshift_logging_mux_app_port }}"
ops_host: "{{ openshift_logging_mux_ops_host }}"
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 70afe5cee..ff18d3270 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -45,6 +45,8 @@ spec:
{% endif %}
{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
+ requests:
+ memory: "{{mux_memory_limit}}"
{% endif %}
{% endif %}
ports:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index a4c178908..d70106276 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,6 +1,6 @@
---
-r_openshift_master_firewall_enabled: True
-r_openshift_master_use_firewalld: False
+r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
openshift_node_ips: []
r_openshift_master_clean_install: false
@@ -19,3 +19,8 @@ r_openshift_master_os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+
+oreg_url: ''
+oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_replace: False
diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml
new file mode 100644
index 000000000..0013f5289
--- /dev/null
+++ b/roles/openshift_master/tasks/bootstrap.yml
@@ -0,0 +1,28 @@
+---
+
+- name: ensure the node-bootstrap service account exists
+ oc_serviceaccount:
+ name: node-bootstrapper
+ namespace: openshift-infra
+ state: present
+ run_once: true
+
+- name: grant node-bootstrapper the correct permissions to bootstrap
+ oc_adm_policy_user:
+ namespace: openshift-infra
+ user: system:serviceaccount:openshift-infra:node-bootstrapper
+ resource_kind: cluster-role
+ resource_name: system:node-bootstrapper
+ state: present
+ run_once: true
+
+# TODO: create a module for this command.
+# oc_serviceaccounts_kubeconfig
+- name: create service account kubeconfig with csr rights
+ command: "oc serviceaccounts create-kubeconfig node-bootstrapper -n openshift-infra"
+ register: kubeconfig_out
+
+- name: put service account kubeconfig into a file on disk for bootstrap
+ copy:
+ content: "{{ kubeconfig_out.stdout }}"
+ dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig"
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index a11471891..a06defdb9 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -177,9 +177,6 @@
local_facts:
no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
-- name: Remove the legacy master service if it exists
- include: clean_systemd_units.yml
-
- name: Install the systemd units
include: systemd_units.yml
@@ -218,6 +215,36 @@
- restart master api
- restart master controllers
+- name: modify controller args
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ edits:
+ - key: kubernetesMasterConfig.controllerArguments.cluster-signing-cert-file
+ value:
+ - /etc/origin/master/ca.crt
+ - key: kubernetesMasterConfig.controllerArguments.cluster-signing-key-file
+ value:
+ - /etc/origin/master/ca.key
+ notify:
+ - restart master controllers
+ when: openshift_master_bootstrap_enabled | default(False)
+
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{oreg_auth_credentials_path }}"
+ when:
+ - oreg_auth_user is defined
+ register: master_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ notify:
+ - restart master api
+ - restart master controllers
+
- include: set_loopback_context.yml
when:
- openshift.common.version_gte_3_2_or_1_2
@@ -366,3 +393,7 @@
shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
when:
- l_install_result | changed
+
+- name: node bootstrap settings
+ include: bootstrap.yml
+ when: openshift_master_bootstrap_enabled | default(False)
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 72c231e52..c480d8223 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -3,6 +3,16 @@
# playbooks. For that reason the ha_svc variables are use set_fact instead of
# the vars directory on the role.
+# This play may be consumed outside the role, we need to ensure that
+# openshift_master_config_dir is set.
+- name: Set openshift_master_config_dir if unset
+ set_fact:
+ openshift_master_config_dir: '/etc/origin/master'
+ when: openshift_master_config_dir is not defined
+
+- name: Remove the legacy master service if it exists
+ include: clean_systemd_units.yml
+
- name: Init HA Service Info
set_fact:
containerized_svc_dir: "/usr/lib/systemd/system"
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 973b3a619..a7dad5b1f 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,6 +1,64 @@
---
-r_openshift_node_firewall_enabled: True
-r_openshift_node_use_firewalld: False
+r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_service_type: "{{ openshift.common.service_type }}"
+
+openshift_image_tag: ''
+
+openshift_node_ami_prep_packages:
+- "{{ openshift_service_type }}-master"
+- "{{ openshift_service_type }}-node"
+- "{{ openshift_service_type }}-docker-excluder"
+- "{{ openshift_service_type }}-sdn-ovs"
+- ansible
+- openvswitch
+- docker
+- etcd
+#- pcs
+- haproxy
+- dnsmasq
+- ntp
+- logrotate
+- httpd-tools
+- bind
+- firewalld
+- libselinux-python
+- conntrack-tools
+- openssl
+- cloud-init
+- iproute
+- python-dbus
+- PyYAML
+- yum-utils
+- python2-boto
+- python2-boto3
+- cloud-utils-growpart
+# gluster
+- glusterfs-fuse
+- heketi-client
+# nfs
+- nfs-utils
+- flannel
+- bash-completion
+# cockpit
+- cockpit-ws
+- cockpit-system
+- cockpit-bridge
+- cockpit-docker
+# iscsi
+- iscsi-initiator-utils
+# ceph
+- ceph-common
+# systemcontainer
+# - runc
+# - container-selinux
+# - atomic
+#
+openshift_deployment_type: origin
+
+openshift_node_bootstrap: False
+
r_openshift_node_os_firewall_deny: []
r_openshift_node_os_firewall_allow:
- service: Kubernetes kubelet
@@ -21,3 +79,8 @@ r_openshift_node_os_firewall_allow:
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
cond: "{{ openshift_node_port_range is defined }}"
+
+oreg_url: ''
+oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
+oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_replace: False
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index f2c45a4bd..14ba48aba 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -27,6 +27,7 @@
when:
- (not skip_node_svc_handlers | default(False) | bool)
- not (node_service_status_changed | default(false) | bool)
+ - not openshift_node_bootstrap
- name: reload sysctl.conf
command: /sbin/sysctl -p
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 06373de04..3db980514 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -19,6 +19,7 @@ dependencies:
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
+ when: not openshift_node_bootstrap
- role: openshift_cloud_provider
- role: openshift_node_dnsmasq
when: openshift.common.use_dnsmasq | bool
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
new file mode 100644
index 000000000..cb1440283
--- /dev/null
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -0,0 +1,55 @@
+---
+- name: install needed rpm(s)
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ openshift_node_ami_prep_packages }}"
+
+- name: create the directory for node
+ file:
+ state: directory
+ path: "/etc/systemd/system/{{ openshift_service_type }}-node.service.d"
+
+- name: laydown systemd override
+ copy:
+ dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service.d/override.conf"
+ content: |
+ [Unit]
+ After=cloud-init.service
+
+- name: update the sysconfig to have KUBECONFIG
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
+ line: "KUBECONFIG=/root/csr_kubeconfig"
+ regexp: "^KUBECONFIG=.*"
+
+- name: update the ExecStart to have bootstrap
+ lineinfile:
+ dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service"
+ line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
+ regexp: "^ExecStart=.*"
+
+- name: "systemctl enable {{ openshift_service_type }}-node"
+ systemd:
+ name: "{{ item }}"
+ enabled: no
+ with_items:
+ - "{{ openshift_service_type }}-node.service"
+ - "{{ openshift_service_type }}-master.service"
+
+- name: Check for RPM generated config marker file .config_managed
+ stat:
+ path: /etc/origin/.config_managed
+ register: rpmgenerated_config
+
+- name: Remove RPM generated config files if present
+ file:
+ path: "/etc/origin/{{ item }}"
+ state: absent
+ when:
+ - rpmgenerated_config.stat.exists
+ - openshift_deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ with_items:
+ - master
+ - node
+ - .config_managed
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
new file mode 100644
index 000000000..8210fd881
--- /dev/null
+++ b/roles/openshift_node/tasks/config.yml
@@ -0,0 +1,111 @@
+---
+- name: Install the systemd units
+ include: systemd_units.yml
+
+- name: Check for tuned package
+ command: rpm -q tuned
+ args:
+ warn: no
+ register: tuned_installed
+ changed_when: false
+ failed_when: false
+
+- name: Set atomic-guest tuned profile
+ command: "tuned-adm profile atomic-guest"
+ when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool
+
+- name: Start and enable openvswitch service
+ systemd:
+ name: openvswitch.service
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ when:
+ - openshift.common.is_containerized | bool
+ - openshift.common.use_openshift_sdn | default(true) | bool
+ register: ovs_start_result
+ until: not ovs_start_result | failed
+ retries: 3
+ delay: 30
+
+- set_fact:
+ ovs_service_status_changed: "{{ ovs_start_result | changed }}"
+
+- file:
+ dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
+ state: directory
+ when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
+
+# TODO: add the validate parameter when there is a validation command to run
+- name: Create the Node config
+ template:
+ dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
+ src: node.yaml.v1.j2
+ backup: true
+ owner: root
+ group: root
+ mode: 0600
+ notify:
+ - restart node
+
+- name: Configure Node Environment Variables
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "^{{ item.key }}="
+ line: "{{ item.key }}={{ item.value }}"
+ create: true
+ with_dict: "{{ openshift.node.env_vars | default({}) }}"
+ notify:
+ - restart node
+
+# Necessary because when you're on a node that's also a master the master will be
+# restarted after the node restarts docker and it will take up to 60 seconds for
+# systemd to start the master again
+- when: openshift.common.is_containerized | bool
+ block:
+ - name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt
+ {{ openshift_node_master_api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+
+ - name: Start and enable node dep
+ systemd:
+ daemon_reload: yes
+ name: "{{ openshift.common.service_type }}-node-dep"
+ enabled: yes
+ state: started
+
+- name: Start and enable node
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: node_start_result
+ until: not node_start_result | failed
+ retries: 1
+ delay: 30
+ ignore_errors: true
+
+- name: Dump logs from node service if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ when: node_start_result | failed
+
+- name: Abort if node failed to start
+ fail:
+ msg: Node failed to start please inspect the logs and try again
+ when: node_start_result | failed
+
+- set_fact:
+ node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
new file mode 100644
index 000000000..9bf4ed879
--- /dev/null
+++ b/roles/openshift_node/tasks/install.yml
@@ -0,0 +1,33 @@
+---
+# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
+# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
+- when: not openshift.common.is_containerized | bool
+ block:
+ - name: Install Node package
+ package:
+ name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
+
+ - name: Install sdn-ovs package
+ package:
+ name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
+ when:
+ - openshift.common.use_openshift_sdn | default(true) | bool
+
+ - name: Install conntrack-tools package
+ package:
+ name: "conntrack-tools"
+ state: present
+
+- when:
+ - openshift.common.is_containerized | bool
+ - not openshift.common.is_node_system_container | bool
+ block:
+ - name: Pre-pull node image when containerized
+ command: >
+ docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+
+ - include: config/install-node-docker-service-file.yml
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 3353a22e3..60a25dcc6 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,43 +1,15 @@
---
-# TODO: allow for overriding default ports where possible
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when:
- (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
- - not openshift_docker_use_crio | default(false)
+ - not openshift_use_crio | default(false)
- name: setup firewall
include: firewall.yml
static: yes
-- name: Set node facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- # Reset node labels to an empty dictionary.
- - role: node
- local_facts:
- labels: {}
- - role: node
- local_facts:
- annotations: "{{ openshift_node_annotations | default(none) }}"
- debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
- iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
- kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
- registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
- set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
- node_image: "{{ osn_image | default(None) }}"
- ovs_image: "{{ osn_ovs_image | default(None) }}"
- proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
- local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
- env_vars: "{{ openshift_node_env_vars | default(None) }}"
-
+#### Disable SWAP #####
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
- name: Check for swap usage
command: grep "^[^#].*swap" /etc/fstab
@@ -46,9 +18,10 @@
failed_when: false
register: swap_result
-# Disable Swap Block
-- block:
-
+- when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true) | bool
+ block:
- name: Disable swap
command: swapoff --all
@@ -64,55 +37,17 @@
dest: /etc/fstab
line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
state: present
+#### End Disable Swap Block ####
- when:
- - swap_result.stdout_lines | length > 0
- - openshift_disable_swap | default(true) | bool
-# End Disable Swap Block
-
-# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
-# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
-- name: Install Node package
- package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when: not openshift.common.is_containerized | bool
-
-- name: Check for tuned package
- command: rpm -q tuned
- args:
- warn: no
- register: tuned_installed
- changed_when: false
- failed_when: false
-
-- name: Set atomic-guest tuned profile
- command: "tuned-adm profile atomic-guest"
- when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool
-
-- name: Install sdn-ovs package
- package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when:
- - openshift.common.use_openshift_sdn | default(true) | bool
- - not openshift.common.is_containerized | bool
+- name: include node installer
+ include: install.yml
- name: Restart cri-o
systemd:
name: cri-o
enabled: yes
state: restarted
- when: openshift_docker_use_crio | default(false)
-
-- name: Install conntrack-tools package
- package:
- name: "conntrack-tools"
- state: present
- when: not openshift.common.is_containerized | bool
-
-- name: Install the systemd units
- include: systemd_units.yml
+ when: openshift_use_crio | default(false)
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
@@ -126,37 +61,26 @@
notify:
- reload sysctl.conf
-- name: Start and enable openvswitch service
- systemd:
- name: openvswitch.service
- enabled: yes
- state: started
- daemon_reload: yes
- when:
- - openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
- register: ovs_start_result
- until: not ovs_start_result | failed
- retries: 3
- delay: 30
+- name: include bootstrap node config
+ include: bootstrap.yml
+ when: openshift_node_bootstrap
-- set_fact:
- ovs_service_status_changed: "{{ ovs_start_result | changed }}"
+- name: include standard node config
+ include: config.yml
+ when: not openshift_node_bootstrap
-- file:
- dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
- state: directory
- when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{oreg_auth_credentials_path }}"
+ when:
+ - oreg_auth_user is defined
+ register: node_oreg_auth_credentials_stat
-# TODO: add the validate parameter when there is a validation command to run
-- name: Create the Node config
- template:
- dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
- src: node.yaml.v1.j2
- backup: true
- owner: root
- group: root
- mode: 0600
+- name: Create credentials for registry auth
+ command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ when:
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
notify:
- restart node
@@ -176,16 +100,7 @@
notify:
- restart node
-- name: Configure Node Environment Variables
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "^{{ item.key }}="
- line: "{{ item.key }}={{ item.value }}"
- create: true
- with_dict: "{{ openshift.node.env_vars | default({}) }}"
- notify:
- - restart node
-
+#### Storage class plugins here ####
- name: NFS storage plugin configuration
include: storage_plugins/nfs.yml
tags:
@@ -203,55 +118,7 @@
include: storage_plugins/iscsi.yml
when: "'iscsi' in openshift.node.storage_plugin_deps"
-# Necessary because when you're on a node that's also a master the master will be
-# restarted after the node restarts docker and it will take up to 60 seconds for
-# systemd to start the master again
-- name: Wait for master API to become available before proceeding
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt
- {{ openshift_node_master_api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Start and enable node dep
- systemd:
- daemon_reload: yes
- name: "{{ openshift.common.service_type }}-node-dep"
- enabled: yes
- state: started
- when: openshift.common.is_containerized | bool
-
-
-- name: Start and enable node
- systemd:
- name: "{{ openshift.common.service_type }}-node"
- enabled: yes
- state: started
- daemon_reload: yes
- register: node_start_result
- until: not node_start_result | failed
- retries: 1
- delay: 30
- ignore_errors: true
-
-- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
- when: node_start_result | failed
-
-- name: Abort if node failed to start
- fail:
- msg: Node failed to start please inspect the logs and try again
- when: node_start_result | failed
+##### END Storage #####
-- set_fact:
- node_service_status_changed: "{{ node_start_result | changed }}"
+- include: config/workaround-bz1331590-ovs-oom-fix.yml
+ when: openshift.common.use_openshift_sdn | default(true) | bool
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index dc1df9185..e09063aa5 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_docker_use_crio | default(false) }}"
+ l_use_crio: "{{ openshift_use_crio | default(false) }}"
- set_fact:
l_service_name: "cri-o"
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index b86bb1549..4687400cd 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,22 +1,6 @@
---
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
-
-- include: config/install-node-deps-docker-service-file.yml
- when: openshift.common.is_containerized | bool
-
-- block:
- - name: Pre-pull node image
- command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
-
- - include: config/install-node-docker-service-file.yml
- when:
- - openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
-
- name: Install Node service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
@@ -26,24 +10,24 @@
- reload systemd units
- restart node
-- include: config/install-ovs-service-env-file.yml
- when: openshift.common.is_containerized | bool
+- when: openshift.common.is_containerized | bool
+ block:
+ - name: include node deps docker service file
+ include: config/install-node-deps-docker-service-file.yml
-- name: Install Node system container
- include: node_system_container.yml
- when:
- - openshift.common.is_containerized | bool
- - openshift.common.is_node_system_container | bool
+ - name: include ovs service environment file
+ include: config/install-ovs-service-env-file.yml
-- name: Install OpenvSwitch system containers
- include: openvswitch_system_container.yml
- when:
- - openshift.common.use_openshift_sdn | default(true) | bool
- - openshift.common.is_containerized | bool
- - openshift.common.is_openvswitch_system_container | bool
+ - name: Install Node system container
+ include: node_system_container.yml
+ when:
+ - openshift.common.is_node_system_container | bool
-- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift.common.use_openshift_sdn | default(true) | bool
+ - name: Install OpenvSwitch system containers
+ include: openvswitch_system_container.yml
+ when:
+ - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift.common.is_openvswitch_system_container | bool
- block:
- name: Pre-pull openvswitch image
diff --git a/roles/openshift_node/tasks/tuned.yml b/roles/openshift_node/tasks/tuned.yml
new file mode 100644
index 000000000..425bf6a26
--- /dev/null
+++ b/roles/openshift_node/tasks/tuned.yml
@@ -0,0 +1,41 @@
+---
+- name: Check for tuned package
+ command: rpm -q tuned
+ args:
+ warn: no
+ register: tuned_installed
+ changed_when: false
+ failed_when: false
+
+- name: Tuned service setup
+ block:
+ - name: Set tuned OpenShift variables
+ set_fact:
+ openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}"
+ tuned_etc_directory: '/etc/tuned'
+ tuned_templates_source: '../templates/tuned'
+
+ - name: Ensure directory structure exists
+ file:
+ state: directory
+ dest: '{{ tuned_etc_directory }}/{{ item.path }}'
+ with_filetree: '{{ tuned_templates_source }}'
+ when: item.state == 'directory'
+
+ - name: Ensure files are populated from templates
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ tuned_etc_directory }}/{{ item.path }}'
+ with_filetree: '{{ tuned_templates_source }}'
+ when: item.state == 'file'
+
+ - name: Make tuned use the recommended tuned profile on restart
+ file: path=/etc/tuned/active_profile state=absent
+
+ - name: Restart tuned service
+ systemd:
+ state: restarted
+ daemon_reload: yes
+ name: tuned
+
+ when: tuned_installed.rc == 0 | bool
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index 3d0ae3bbd..0856737f6 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -8,7 +8,7 @@ Wants={{ openshift.docker.service_name }}.service
Documentation=https://github.com/openshift/origin
Requires=dnsmasq.service
After=dnsmasq.service
-{% if openshift.docker.use_crio %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
[Service]
Type=notify
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 93f8658b4..f59aa6fb4 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -16,13 +16,11 @@ imageConfig:
latest: false
kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
-{% if openshift.docker.use_crio | default(False) %}
+{% if openshift_use_crio | default(False) %}
container-runtime:
- remote
container-runtime-endpoint:
- /var/run/crio.sock
- experimental-cri:
- - 'true'
image-service-endpoint:
- /var/run/crio.sock
node-labels:
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index c4580be1f..8734e7443 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -3,7 +3,7 @@ Requires={{ openshift.docker.service_name }}.service
After={{ openshift.docker.service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
-{% if openshift.docker.use_crio %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
[Service]
ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
diff --git a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf b/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf
new file mode 100644
index 000000000..f22f21065
--- /dev/null
+++ b/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf
@@ -0,0 +1,25 @@
+#
+# tuned configuration
+#
+
+[main]
+summary=Optimize systems running OpenShift control plane
+include=openshift
+
+[sysctl]
+# ktune sysctl settings, maximizing i/o throughput
+#
+# Minimal preemption granularity for CPU-bound tasks:
+# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds)
+kernel.sched_min_granularity_ns=10000000
+
+# The total time the scheduler will consider a migrated process
+# "cache hot" and thus less likely to be re-migrated
+# (system default is 500000, i.e. 0.5 ms)
+kernel.sched_migration_cost_ns=5000000
+
+# SCHED_OTHER wake-up granularity.
+#
+# Preemption granularity when tasks wake up. Lower the value to improve
+# wake-up latency and throughput for latency critical tasks.
+kernel.sched_wakeup_granularity_ns = 4000000
diff --git a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf b/roles/openshift_node/templates/tuned/openshift-node/tuned.conf
new file mode 100644
index 000000000..78c7d19c9
--- /dev/null
+++ b/roles/openshift_node/templates/tuned/openshift-node/tuned.conf
@@ -0,0 +1,10 @@
+#
+# tuned configuration
+#
+
+[main]
+summary=Optimize systems running OpenShift nodes
+include=openshift
+
+[sysctl]
+net.ipv4.tcp_fastopen=3
diff --git a/roles/openshift_node/templates/tuned/openshift/tuned.conf b/roles/openshift_node/templates/tuned/openshift/tuned.conf
new file mode 100644
index 000000000..68ac5dadb
--- /dev/null
+++ b/roles/openshift_node/templates/tuned/openshift/tuned.conf
@@ -0,0 +1,24 @@
+#
+# tuned configuration
+#
+
+[main]
+summary=Optimize systems running OpenShift (parent profile)
+include=${f:virt_check:{{ openshift_tuned_guest_profile }}:throughput-performance}
+
+[selinux]
+avc_cache_threshold=65536
+
+[net]
+nf_conntrack_hashsize=131072
+
+[sysctl]
+kernel.pid_max=131072
+net.netfilter.nf_conntrack_max=1048576
+fs.inotify.max_user_watches=65536
+net.ipv4.neigh.default.gc_thresh1=8192
+net.ipv4.neigh.default.gc_thresh2=32768
+net.ipv4.neigh.default.gc_thresh3=65536
+net.ipv6.neigh.default.gc_thresh1=8192
+net.ipv6.neigh.default.gc_thresh2=32768
+net.ipv6.neigh.default.gc_thresh3=65536
diff --git a/roles/openshift_node/templates/tuned/recommend.conf b/roles/openshift_node/templates/tuned/recommend.conf
new file mode 100644
index 000000000..5fa765798
--- /dev/null
+++ b/roles/openshift_node/templates/tuned/recommend.conf
@@ -0,0 +1,8 @@
+[openshift-node]
+/etc/origin/node/node-config.yaml=.*region=primary
+
+[openshift-control-plane,master]
+/etc/origin/master/master-config.yaml=.*
+
+[openshift-control-plane,node]
+/etc/origin/node/node-config.yaml=.*region=infra
diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml
index 70a38b844..455f26f30 100644
--- a/roles/openshift_node_certificates/defaults/main.yml
+++ b/roles/openshift_node_certificates/defaults/main.yml
@@ -1,2 +1,3 @@
---
openshift_node_cert_expire_days: 730
+openshift_ca_host: ''
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 4aab8f2e9..61d2a5b51 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -46,9 +46,7 @@ if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then
def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')
def_route_int=$(/sbin/ip route get to ${def_route} | awk '{print $3}')
def_route_ip=$(/sbin/ip route get to ${def_route} | awk '{print $5}')
- if [[ ${DEVICE_IFACE} == ${def_route_int} && \
- -n "${IP4_NAMESERVERS}" && \
- "${IP4_NAMESERVERS}" != "${def_route_ip}" ]]; then
+ if [[ ${DEVICE_IFACE} == ${def_route_int} ]]; then
if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
cat << EOF > /etc/dnsmasq.d/origin-dns.conf
no-resolv
@@ -61,35 +59,40 @@ EOF
NEEDS_RESTART=1
fi
- ######################################################################
- # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf
- # and /etc/origin/node/resolv.conf in their respective formats
- for ns in ${IP4_NAMESERVERS}; do
- if [[ ! -z $ns ]]; then
- echo "server=${ns}" >> $UPSTREAM_DNS_TMP
- echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF
+ # If network manager doesn't know about the nameservers then the best
+ # we can do is grab them from /etc/resolv.conf but only if we've got no
+ # watermark
+ if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+ if [[ -z "${IP4_NAMESERVERS}" || "${IP4_NAMESERVERS}" == "${def_route_ip}" ]]; then
+ IP4_NAMESERVERS=`grep '^nameserver ' /etc/resolv.conf | awk '{ print $2 }'`
+ fi
+ ######################################################################
+ # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf
+ # and /etc/origin/node/resolv.conf in their respective formats
+ for ns in ${IP4_NAMESERVERS}; do
+ if [[ ! -z $ns ]]; then
+ echo "server=${ns}" >> $UPSTREAM_DNS_TMP
+ echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF
+ fi
+ done
+ # Sort it in case DNS servers arrived in a different order
+ sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED
+ sort $UPSTREAM_DNS > $CURRENT_UPSTREAM_DNS_SORTED
+ # Compare to the current config file (sorted)
+ NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'`
+ CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'`
+ if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then
+ # DNS has changed, copy the temp file to the proper location (-Z
+ # sets default selinux context) and set the restart flag
+ cp -Z $UPSTREAM_DNS_TMP $UPSTREAM_DNS
+ NEEDS_RESTART=1
+ fi
+ # compare /etc/origin/node/resolv.conf checksum and replace it if different
+ NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}`
+ OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf`
+ if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then
+ cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf
fi
- done
-
- # Sort it in case DNS servers arrived in a different order
- sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED
- sort $UPSTREAM_DNS > $CURRENT_UPSTREAM_DNS_SORTED
-
- # Compare to the current config file (sorted)
- NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'`
- CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'`
- if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then
- # DNS has changed, copy the temp file to the proper location (-Z
- # sets default selinux context) and set the restart flag
- cp -Z $UPSTREAM_DNS_TMP $UPSTREAM_DNS
- NEEDS_RESTART=1
- fi
-
- # compare /etc/origin/node/resolv.conf checksum and replace it if different
- NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}`
- OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf`
- if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then
- cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf
fi
if ! `systemctl -q is-active dnsmasq.service`; then
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
index d0221a94b..9bbaafc29 100644
--- a/roles/openshift_node_dnsmasq/tasks/main.yml
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -14,6 +14,17 @@
package: name=dnsmasq state=installed
when: not openshift.common.is_atomic | bool
+- name: ensure origin/node directory exists
+ file:
+ state: directory
+ path: "{{ item }}"
+ owner: root
+ group: root
+ mode: '0700'
+ with_items:
+ - /etc/origin
+ - /etc/origin/node
+
# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed
# when the node stops. A dbus-message is sent to dnsmasq to add the same entries
# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
new file mode 100644
index 000000000..c5a44bffb
--- /dev/null
+++ b/roles/openshift_prometheus/README.md
@@ -0,0 +1,95 @@
+OpenShift Prometheus
+====================
+
+OpenShift Prometheus Installation
+
+Requirements
+------------
+
+
+Role Variables
+--------------
+
+For default values, see [`defaults/main.yaml`](defaults/main.yaml).
+
+- `openshift_prometheus_state`: present - install/update. absent - uninstall.
+
+- `openshift_prometheus_namespace`: project (i.e. namespace) where the components will be
+ deployed.
+
+- `openshift_prometheus_replicas`: The number of replicas for prometheus deployment.
+
+- `openshift_prometheus_node_selector`: Selector for the nodes prometheus will be deployed on.
+
+- `openshift_prometheus_image_<COMPONENT>`: specify image for the component
+
+## Storage related variables
+Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can set pv claim by setting corresponding role variable:
+```
+openshift_prometheus_<COMPONENT>_storage_type: <VALUE>
+openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
+```
+e.g
+```
+openshift_prometheus_storage_type: pvc
+openshift_prometheus_alertmanager_pvc_name: alertmanager
+openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
+```
+
+## Additional Alert Rules file variable
+An external file with alert rules can be added by setting path to additional rules variable:
+```
+openshift_prometheus_additional_rules_file: <PATH>
+```
+
+File content should be in prometheus alert rules format.
+Following example sets rule to fire an alert when one of the cluster nodes is down:
+
+```
+groups:
+- name: example-rules
+ interval: 30s # defaults to global interval
+ rules:
+ - alert: Node Down
+ expr: up{job="kubernetes-nodes"} == 0
+ annotations:
+ miqTarget: "ContainerNode"
+ severity: "HIGH"
+ message: "{{ '{{' }}{{ '$labels.instance' }}{{ '}}' }} is down"
+```
+
+
+## Additional variables to control resource limits
+Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can specify a cpu and memory limits and requests by setting
+the corresponding role variable:
+```
+openshift_prometheus_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE>
+```
+e.g
+```
+openshift_prometheus_alertmanager_limits_memory: 1Gi
+openshift_prometheus_oath_proxy_requests_cpu: 100
+```
+
+Dependencies
+------------
+
+openshift_facts
+
+
+Example Playbook
+----------------
+
+```
+- name: Configure openshift-prometheus
+ hosts: oo_first_master
+ roles:
+ - role: openshift_prometheus
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
new file mode 100644
index 000000000..18d6a1645
--- /dev/null
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -0,0 +1,74 @@
+---
+# defaults file for openshift_prometheus
+openshift_prometheus_state: present
+
+openshift_prometheus_namespace: prometheus
+
+openshift_prometheus_replicas: 1
+openshift_prometheus_node_selector: {"region":"infra"}
+
+# images
+openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
+openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
+openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
+openshift_prometheus_image_alertbuffer: "ilackarms/message-buffer"
+
+# additional prometheus rules file
+openshift_prometheus_additional_rules_file: null
+
+# All the required exports
+openshift_prometheus_pv_exports:
+ - prometheus
+ - prometheus-alertmanager
+ - prometheus-alertbuffer
+# PV template files and their created object names
+openshift_prometheus_pv_data:
+ - pv_name: prometheus
+ pv_template: prom-pv-server.yml
+ pv_label: Prometheus Server PV
+ - pv_name: prometheus-alertmanager
+ pv_template: prom-pv-alertmanager.yml
+ pv_label: Prometheus Alertmanager PV
+ - pv_name: prometheus-alertbuffer
+ pv_template: prom-pv-alertbuffer.yml
+ pv_label: Prometheus Alert Buffer PV
+
+# Hostname/IP of the NFS server. Currently defaults to first master
+openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}"
+
+# storage
+openshift_prometheus_storage_type: pvc
+openshift_prometheus_pvc_name: prometheus
+openshift_prometheus_pvc_size: 10G
+openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_pvc_pv_selector: {}
+
+openshift_prometheus_alertmanager_storage_type: pvc
+openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
+openshift_prometheus_alertmanager_pvc_size: 10G
+openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_alertmanager_pvc_pv_selector: {}
+
+openshift_prometheus_alertbuffer_storage_type: pvc
+openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
+openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_alertbuffer_pvc_pv_selector: {}
+
+# container resources
+openshift_prometheus_cpu_limit: null
+openshift_prometheus_memory_limit: null
+openshift_prometheus_cpu_requests: null
+openshift_prometheus_memory_requests: null
+openshift_prometheus_alertmanager_cpu_limit: null
+openshift_prometheus_alertmanager_memory_limit: null
+openshift_prometheus_alertmanager_cpu_requests: null
+openshift_prometheus_alertmanager_memory_requests: null
+openshift_prometheus_alertbuffer_cpu_limit: null
+openshift_prometheus_alertbuffer_memory_limit: null
+openshift_prometheus_alertbuffer_cpu_requests: null
+openshift_prometheus_alertbuffer_memory_requests: null
+openshift_prometheus_oauth_proxy_cpu_limit: null
+openshift_prometheus_oauth_proxy_memory_limit: null
+openshift_prometheus_oauth_proxy_cpu_requests: null
+openshift_prometheus_oauth_proxy_memory_requests: null
diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports
new file mode 100644
index 000000000..3ccedb1fd
--- /dev/null
+++ b/roles/openshift_prometheus/files/openshift_prometheus.exports
@@ -0,0 +1,3 @@
+/exports/prometheus *(rw,no_root_squash,no_wdelay)
+/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay)
+/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml
new file mode 100644
index 000000000..33188bb7e
--- /dev/null
+++ b/roles/openshift_prometheus/meta/main.yaml
@@ -0,0 +1,19 @@
+---
+galaxy_info:
+ author: OpenShift Development <dev@lists.openshift.redhat.com>
+ description: Deploy OpenShift prometheus integration for the cluster
+ company: Red Hat, Inc.
+ license: license (Apache)
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - openshift
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml
new file mode 100644
index 000000000..4e79da05f
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/create_pvs.yaml
@@ -0,0 +1,36 @@
+---
+# Check for existance and then conditionally:
+# - evaluate templates
+# - PVs
+#
+# These tasks idempotently create required Prometheus PV objects. Do not
+# call this file directly. This file is intended to be ran as an
+# include that has a 'with_items' attached to it. Hence the use below
+# of variables like "{{ item.pv_label }}"
+
+- name: "Check if the {{ item.pv_label }} template has been created already"
+ oc_obj:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ state: list
+ kind: pv
+ name: "{{ item.pv_name }}"
+ register: prom_pv_check
+
+# Skip all of this if the PV already exists
+- block:
+ - name: "Ensure the {{ item.pv_label }} template is evaluated"
+ template:
+ src: "{{ item.pv_template }}.j2"
+ dest: "{{ tempdir }}/templates/{{ item.pv_template }}"
+
+ - name: "Ensure {{ item.pv_label }} is created"
+ oc_obj:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ kind: pv
+ name: "{{ item.pv_name }}"
+ state: present
+ delete_after: True
+ files:
+ - "{{ tempdir }}/templates/{{ item.pv_template }}"
+ when:
+ - not prom_pv_check.results.results.0
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
new file mode 100644
index 000000000..93bdda3e8
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -0,0 +1,241 @@
+---
+
+# namespace
+- name: Add prometheus project
+ oc_project:
+ state: "{{ state }}"
+ name: "{{ openshift_prometheus_namespace }}"
+ node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"
+ description: Prometheus
+
+# secrets
+- name: Set alert and prometheus secrets
+ oc_secret:
+ state: "{{ state }}"
+ name: "{{ item }}-proxy"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ contents:
+ - path: session_secret
+ data: "{{ 43 | oo_random_word }}="
+ with_items:
+ - prometheus
+ - alerts
+
+# serviceaccount
+- name: create prometheus serviceaccount
+ oc_serviceaccount:
+ state: "{{ state }}"
+ name: prometheus
+ namespace: "{{ openshift_prometheus_namespace }}"
+ # TODO add annotations when supproted
+ # annotations:
+ # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+
+ secrets:
+ - prometheus-secrets
+ changed_when: no
+
+# TODO remove this when annotations are supported by oc_serviceaccount
+- name: annotate serviceaccount
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ serviceaccount prometheus
+ serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+
+
+# create clusterrolebinding for prometheus serviceaccount
+- name: Set cluster-reader permissions for prometheus
+ oc_adm_policy_user:
+ state: "{{ state }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
+
+
+######################################################################
+# NFS
+# In the case that we are not running on a cloud provider, volumes must be statically provisioned
+
+- include: nfs.yaml
+ when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
+
+
+# create prometheus and alerts services
+# TODO join into 1 task with loop
+- name: Create prometheus service
+ oc_service:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ selector:
+ app: prometheus
+ labels:
+ name: "{{ item.name }}"
+ # TODO add annotations when supported
+ # annotations:
+ # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ ports:
+ - port: 443
+ targetPort: 8443
+ with_items:
+ - name: prometheus
+
+- name: Create alerts service
+ oc_service:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ selector:
+ app: prometheus
+ labels:
+ name: "{{ item.name }}"
+ # TODO add annotations when supported
+ # annotations:
+ # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ ports:
+ - port: 443
+ targetPort: 9443
+ with_items:
+ - name: alerts
+
+
+# Annotate services with secret name
+# TODO remove this when annotations are supported by oc_service
+- name: annotate prometheus service
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ service prometheus 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls'
+
+- name: annotate alerts service
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls'
+
+# create prometheus and alerts routes
+- name: create prometheus and alerts routes
+ oc_route:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ service_name: "{{ item.name }}"
+ tls_termination: reencrypt
+ with_items:
+ - name: prometheus
+ - name: alerts
+
+# Storage
+- name: create prometheus pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_pvc_size }}"
+ selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+
+- name: create alertmanager pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_alertmanager_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
+ selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+
+- name: create alertbuffer pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_alertbuffer_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
+ selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+
+# create prometheus deployment
+- name: Set prometheus deployment template
+ template:
+ src: prometheus_deployment.j2
+ dest: "{{ tempdir }}/templates/prometheus.yaml"
+ vars:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ prom_replicas: "{{ openshift_prometheus_replicas }}"
+
+- name: Set prometheus deployment
+ oc_obj:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ kind: deployment
+ files:
+ - "{{ tempdir }}/templates/prometheus.yaml"
+ delete_after: true
+
+# prometheus configmap
+# Copy the additional rules file if it is defined
+- name: Copy additional rules file to host
+ copy:
+ src: "{{ openshift_prometheus_additional_rules_file }}"
+ dest: "{{ tempdir }}/prometheus.additional.rules"
+ when:
+ - openshift_prometheus_additional_rules_file is defined
+ - openshift_prometheus_additional_rules_file is not none
+ - openshift_prometheus_additional_rules_file | trim | length > 0
+
+- stat:
+ path: "{{ tempdir }}/prometheus.additional.rules"
+ register: additional_rules_stat
+
+# The kubernetes version impacts the prometheus scraping endpoint
+# so gathering it before constructing the configmap
+- name: get oc version
+ oc_version:
+ register: oc_version
+
+- set_fact:
+ kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}"
+
+- template:
+ src: prometheus.yml.j2
+ dest: "{{ tempdir }}/prometheus.yml"
+ changed_when: no
+
+- template:
+ src: prometheus.rules.j2
+ dest: "{{ tempdir }}/prometheus.rules"
+ changed_when: no
+
+# In prometheus configmap create "additional.rules" section if file exists
+- name: Set prometheus configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ prometheus.rules: "{{ tempdir }}/prometheus.rules"
+ prometheus.additional.rules: "{{ tempdir }}/prometheus.additional.rules"
+ prometheus.yml: "{{ tempdir }}/prometheus.yml"
+ when: additional_rules_stat.stat.exists == True
+
+- name: Set prometheus configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ prometheus.rules: "{{ tempdir }}/prometheus.rules"
+ prometheus.yml: "{{ tempdir }}/prometheus.yml"
+ when: additional_rules_stat.stat.exists == False
+
+# alertmanager configmap
+- template:
+ src: alertmanager.yml.j2
+ dest: "{{ tempdir }}/alertmanager.yml"
+ changed_when: no
+
+- name: Set alertmanager configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus-alerts"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ alertmanager.yml: "{{ tempdir }}/alertmanager.yml"
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
new file mode 100644
index 000000000..523a64334
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -0,0 +1,26 @@
+---
+
+- name: Create temp directory for doing work in on target
+ command: mktemp -td openshift-prometheus-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+- include: install_prometheus.yaml
+ vars:
+ state: "{{ openshift_prometheus_state }}"
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml
new file mode 100644
index 000000000..0b45f2cee
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/nfs.yaml
@@ -0,0 +1,44 @@
+---
+# Tasks to statically provision NFS volumes
+# Include if not using dynamic volume provisioning
+- name: Ensure the /exports/ directory exists
+ file:
+ path: /exports/
+ state: directory
+ mode: 0755
+ owner: root
+ group: root
+
+- name: Ensure the prom-pv0X export directories exist
+ file:
+ path: "/exports/{{ item }}"
+ state: directory
+ mode: 0777
+ owner: nfsnobody
+ group: nfsnobody
+ with_items: "{{ openshift_prometheus_pv_exports }}"
+
+- name: Ensure the NFS exports for Prometheus PVs exist
+ copy:
+ src: openshift_prometheus.exports
+ dest: /etc/exports.d/openshift_prometheus.exports
+ register: nfs_exports_updated
+
+- name: Ensure the NFS export table is refreshed if exports were added
+ command: exportfs -ar
+ when:
+ - nfs_exports_updated.changed
+
+
+######################################################################
+# Create the required Prometheus PVs. Check out these online docs if you
+# need a refresher on includes looping with items:
+# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
+# * http://stackoverflow.com/a/35128533
+#
+# TODO: Handle the case where a PV template is updated in
+# openshift-ansible and the change needs to be landed on the managed
+# cluster.
+
+- include: create_pvs.yaml
+ with_items: "{{ openshift_prometheus_pv_data }}"
diff --git a/roles/openshift_prometheus/templates/alertmanager.yml.j2 b/roles/openshift_prometheus/templates/alertmanager.yml.j2
new file mode 100644
index 000000000..6c432a3d0
--- /dev/null
+++ b/roles/openshift_prometheus/templates/alertmanager.yml.j2
@@ -0,0 +1,20 @@
+global:
+
+# The root route on which each incoming alert enters.
+route:
+ # default route if none match
+ receiver: alert-buffer-wh
+
+ # The labels by which incoming alerts are grouped together. For example,
+ # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
+ # be batched into a single group.
+ # TODO:
+ group_by: []
+
+ # All the above attributes are inherited by all child routes and can
+ # overwritten on each.
+
+receivers:
+- name: alert-buffer-wh
+ webhook_configs:
+ - url: http://localhost:9099/topics/alerts
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
new file mode 100644
index 000000000..55a5e19c3
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-alertbuffer
+ labels:
+ storage: prometheus-alertbuffer
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus-alertbuffer
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
new file mode 100644
index 000000000..4ee518735
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-alertmanager
+ labels:
+ storage: prometheus-alertmanager
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus-alertmanager
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
new file mode 100644
index 000000000..933bf0f60
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus
+ labels:
+ storage: prometheus
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prometheus.rules.j2 b/roles/openshift_prometheus/templates/prometheus.rules.j2
new file mode 100644
index 000000000..e861dc127
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus.rules.j2
@@ -0,0 +1,4 @@
+groups:
+- name: example-rules
+ interval: 30s # defaults to global interval
+ rules:
diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2
new file mode 100644
index 000000000..63430f834
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus.yml.j2
@@ -0,0 +1,174 @@
+rule_files:
+ - 'prometheus.rules'
+{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %}
+ - 'prometheus.additional.rules'
+{% endif %}
+
+
+
+# A scrape configuration for running Prometheus on a Kubernetes cluster.
+# This uses separate scrape configs for cluster components (i.e. API server, node)
+# and services to allow each to use different authentication configs.
+#
+# Kubernetes labels will be added as Prometheus labels on metrics via the
+# `labelmap` relabeling action.
+
+# Scrape config for API servers.
+#
+# Kubernetes exposes API servers as endpoints to the default/kubernetes
+# service so this uses `endpoints` role and uses relabelling to only keep
+# the endpoints associated with the default/kubernetes service using the
+# default named port `https`. This works for single API server deployments as
+# well as HA API server deployments.
+scrape_configs:
+- job_name: 'kubernetes-apiservers'
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # Keep only the default/kubernetes service endpoints for the https port. This
+ # will add targets for each API server which Kubernetes adds an endpoint to
+ # the default/kubernetes service.
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+
+# Scrape config for nodes.
+#
+# Each node exposes a /metrics endpoint that contains operational metrics for
+# the Kubelet and other components.
+- job_name: 'kubernetes-nodes'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: node
+
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for controllers.
+#
+# Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for
+# the controllers.
+#
+# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via
+# endpoints.
+- job_name: 'kubernetes-controllers'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ # Keep only the default/kubernetes service endpoints for the https port, and then
+ # set the port to 8444. This is the default configuration for the controllers on OpenShift
+ # masters.
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+ - source_labels: [__address__]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+)
+ replacement: $1:8444
+
+# Scrape config for cAdvisor.
+#
+# Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that
+# reports container metrics for each running pod. Scrape those by default.
+- job_name: 'kubernetes-cadvisor'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+{% if kubernetes_version | float() >= 1.7 | float() %}
+ metrics_path: /metrics/cadvisor
+{% else %}
+ metrics_path: /metrics
+{% endif %}
+
+ kubernetes_sd_configs:
+ - role: node
+
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for service endpoints.
+#
+# The relabeling allows the actual service scrape endpoint to be configured
+# via the following annotations:
+#
+# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+# to set this to `https` & most likely set the `tls_config` of the scrape config.
+# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+# * `prometheus.io/port`: If the metrics are exposed on a different port to the
+# service then set this appropriately.
+- job_name: 'kubernetes-service-endpoints'
+
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ # TODO: this should be per target
+ insecure_skip_verify: true
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+);(\d+)
+ replacement: $1:$2
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username]
+ action: replace
+ target_label: __basic_auth_username__
+ regex: (.+)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password]
+ action: replace
+ target_label: __basic_auth_password__
+ regex: (.+)
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: kubernetes_name
+
+alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets:
+ - "localhost:9093"
diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus_deployment.j2
new file mode 100644
index 000000000..98c117f19
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus_deployment.j2
@@ -0,0 +1,240 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: prometheus
+ namespace: {{ namespace }}
+ labels:
+ app: prometheus
+spec:
+ replicas: {{ prom_replicas|default(1) }}
+ selector:
+ provider: openshift
+ matchLabels:
+ app: prometheus
+ template:
+ metadata:
+ name: prometheus
+ labels:
+ app: prometheus
+ spec:
+ serviceAccountName: prometheus
+{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in openshift_prometheus_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ # Deploy Prometheus behind an oauth proxy
+ - name: prom-proxy
+ image: "{{ openshift_prometheus_image_proxy }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 8443
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:8443
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9090
+ - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: prometheus-tls
+ - mountPath: /etc/proxy/secrets
+ name: prometheus-secrets
+ - mountPath: /prometheus
+ name: prometheus-data
+
+ - name: prometheus
+ args:
+ - --storage.tsdb.retention=6h
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --web.listen-address=localhost:9090
+ image: "{{ openshift_prometheus_image_prometheus }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_memory_requests is defined and openshift_prometheus_memory_requests is not none %}
+ memory: "{{openshift_prometheus_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_cpu_requests is defined and openshift_prometheus_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_memory_limit is defined and openshift_prometheus_memory_limit is not none %}
+ memory: "{{ openshift_prometheus_memory_limit }}"
+{% endif %}
+{% if openshift_prometheus_cpu_limit is defined and openshift_prometheus_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_cpu_limit}}"
+{% endif %}
+
+ volumeMounts:
+ - mountPath: /etc/prometheus
+ name: prometheus-config
+ - mountPath: /prometheus
+ name: prometheus-data
+
+ # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy
+ - name: alerts-proxy
+ image: "{{ openshift_prometheus_image_proxy }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 9443
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:9443
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9099
+ - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: alerts-tls
+ - mountPath: /etc/proxy/secrets
+ name: alerts-secrets
+
+ - name: alert-buffer
+ args:
+ - --storage-path=/alert-buffer/messages.db
+ image: "{{ openshift_prometheus_image_alertbuffer }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_alertbuffer_memory_requests is defined and openshift_prometheus_alertbuffer_memory_requests is not none %}
+ memory: "{{openshift_prometheus_alertbuffer_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_alertbuffer_cpu_requests is defined and openshift_prometheus_alertbuffer_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_alertbuffer_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_alertbuffer_memory_limit is defined and openshift_prometheus_alertbuffer_memory_limit is not none %}
+ memory: "{{openshift_prometheus_alertbuffer_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_alertbuffer_cpu_limit is defined and openshift_prometheus_alertbuffer_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_alertbuffer_cpu_limit}}"
+{% endif %}
+ volumeMounts:
+ - mountPath: /alert-buffer
+ name: alert-buffer-data
+ ports:
+ - containerPort: 9099
+ name: alert-buf
+
+ - name: alertmanager
+ args:
+ - -config.file=/etc/alertmanager/alertmanager.yml
+ image: "{{ openshift_prometheus_image_alertmanager }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_alertmanager_memory_requests is defined and openshift_prometheus_alertmanager_memory_requests is not none %}
+ memory: "{{openshift_prometheus_alertmanager_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_alertmanager_cpu_requests is defined and openshift_prometheus_alertmanager_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_alertmanager_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_alertmanager_memory_limit is defined and openshift_prometheus_alertmanager_memory_limit is not none %}
+ memory: "{{openshift_prometheus_alertmanager_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_alertmanager_cpu_limit is defined and openshift_prometheus_alertmanager_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_alertmanager_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 9093
+ name: web
+ volumeMounts:
+ - mountPath: /etc/alertmanager
+ name: alertmanager-config
+ - mountPath: /alertmanager
+ name: alertmanager-data
+
+ restartPolicy: Always
+ volumes:
+ - name: prometheus-config
+ configMap:
+ defaultMode: 420
+ name: prometheus
+ - name: prometheus-secrets
+ secret:
+ secretName: prometheus-proxy
+ - name: prometheus-tls
+ secret:
+ secretName: prometheus-tls
+ - name: prometheus-data
+{% if openshift_prometheus_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
+ - name: alertmanager-config
+ configMap:
+ defaultMode: 420
+ name: prometheus-alerts
+ - name: alerts-secrets
+ secret:
+ secretName: alerts-proxy
+ - name: alerts-tls
+ secret:
+ secretName: prometheus-alerts-tls
+ - name: alertmanager-data
+{% if openshift_prometheus_alertmanager_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_alertmanager_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
+ - name: alert-buffer-data
+{% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_alertbuffer_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_prometheus/tests/inventory b/roles/openshift_prometheus/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/roles/openshift_prometheus/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/roles/openshift_prometheus/tests/test.yaml b/roles/openshift_prometheus/tests/test.yaml
new file mode 100644
index 000000000..37baf573c
--- /dev/null
+++ b/roles/openshift_prometheus/tests/test.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - openshift_prometheus
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 7458db87e..f972c0fd9 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -6,23 +6,24 @@
- when: not ostree_booted.stat.exists
block:
+ # TODO: This needs to be removed and placed into a role
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
- name: Create any additional repos that are defined
- template:
- src: yum_repo.j2
- dest: /etc/yum.repos.d/openshift_additional.repo
- when:
- - openshift_additional_repos | length > 0
- notify: refresh cache
-
- - name: Remove the additional repos if no longer defined
- file:
- dest: /etc/yum.repos.d/openshift_additional.repo
- state: absent
- when:
- - openshift_additional_repos | length == 0
+ yum_repository:
+ description: "{{ item.description | default(item.name | default(item.id)) }}"
+ name: "{{ item.name | default(item.id) }}"
+ baseurl: "{{ item.baseurl }}"
+ gpgkey: "{{ item.gpgkey | default(omit)}}"
+ gpgcheck: "{{ item.gpgcheck | default(1) }}"
+ sslverify: "{{ item.sslverify | default(1) }}"
+ sslclientkey: "{{ item.sslclientkey | default(omit) }}"
+ sslclientcert: "{{ item.sslclientcert | default(omit) }}"
+ file: "{{ item.name }}"
+ enabled: "{{ item.enabled | default('no')}}"
+ with_items: "{{ openshift_additional_repos }}"
+ when: openshift_additional_repos | length > 0
notify: refresh cache
# Singleton block
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
deleted file mode 100644
index ef2cd6603..000000000
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-{% for repo in openshift_additional_repos %}
-[{{ repo.id }}]
-name={{ repo.name | default(repo.id) }}
-baseurl={{ repo.baseurl }}
-{% set enable_repo = repo.enabled | default(1) %}
-enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
-{% set enable_gpg_check = repo.gpgcheck | default(1) %}
-gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
-{% for key, value in repo.iteritems() %}
-{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %}
-{{ key }}={{ value }}
-{% endif %}
-{% endfor %}
-{% endfor %}
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 686857d94..64f94347b 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -168,19 +168,19 @@
- "{{ mktemp.stdout }}/service_catalog_api_server.yml"
delete_after: yes
-- template:
- src: api_server_service.j2
- dest: "{{ mktemp.stdout }}/service_catalog_api_service.yml"
-
- name: Set Service Catalog API Server service
- oc_obj:
- state: present
- namespace: "kube-service-catalog"
- kind: service
+ oc_service:
name: apiserver
- files:
- - "{{ mktemp.stdout }}/service_catalog_api_service.yml"
- delete_after: yes
+ namespace: kube-service-catalog
+ state: present
+ ports:
+ - name: secure
+ port: 443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: apiserver
+ session_affinity: None
- template:
src: api_server_route.j2
@@ -216,19 +216,19 @@
- "{{ mktemp.stdout }}/controller_manager.yml"
delete_after: yes
-- template:
- src: controller_manager_service.j2
- dest: "{{ mktemp.stdout }}/controller_manager_service.yml"
-
- name: Set Controller Manager service
- oc_obj:
- state: present
- namespace: "kube-service-catalog"
- kind: service
+ oc_service:
name: controller-manager
- files:
- - "{{ mktemp.stdout }}/controller_manager_service.yml"
- delete_after: yes
+ namespace: kube-service-catalog
+ state: present
+ ports:
+ - port: 6443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: controller-manager
+ session_affinity: None
+ service_type: ClusterIP
- include: start_api_server.yml
diff --git a/roles/openshift_service_catalog/templates/api_server_service.j2 b/roles/openshift_service_catalog/templates/api_server_service.j2
deleted file mode 100644
index bae337201..000000000
--- a/roles/openshift_service_catalog/templates/api_server_service.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: apiserver
-spec:
- ports:
- - name: secure
- port: 443
- protocol: TCP
- targetPort: 6443
- selector:
- app: apiserver
- sessionAffinity: None
diff --git a/roles/openshift_service_catalog/templates/controller_manager_service.j2 b/roles/openshift_service_catalog/templates/controller_manager_service.j2
deleted file mode 100644
index 2bac645fc..000000000
--- a/roles/openshift_service_catalog/templates/controller_manager_service.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: controller-manager
-spec:
- ports:
- - port: 6443
- protocol: TCP
- targetPort: 6443
- selector:
- app: controller-manager
- sessionAffinity: None
- type: ClusterIP
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index d3de2165a..a059745a6 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -55,7 +55,7 @@ defined:
| Name | Default value | Description |
|-------------------|---------------|-----------------------------------------|
-| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]'
+| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]' **NOTE:** You MUST set this as a host variable on each node host. For some reason, if you set this as a group variable it gets interpreted as a string rather than an array. See https://github.com/openshift/openshift-ansible/issues/5071
In addition, each host may specify the following variables to further control
their configuration as GlusterFS nodes:
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index a5887465e..0b3d3aef1 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -52,8 +52,8 @@ openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glus
openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"
openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile | default(omit) }}"
-r_openshift_master_firewall_enabled: True
-r_openshift_master_use_firewalld: False
+r_openshift_storage_glusterfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_storage_glusterfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
r_openshift_storage_glusterfs_os_firewall_deny: []
r_openshift_storage_glusterfs_os_firewall_allow:
- service: glusterfs_sshd
diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml
index 4a2bc6141..e7e0b331b 100644
--- a/roles/openshift_storage_nfs/defaults/main.yml
+++ b/roles/openshift_storage_nfs/defaults/main.yml
@@ -1,6 +1,6 @@
---
-r_openshift_storage_nfs_firewall_enabled: True
-r_openshift_storage_nfs_use_firewalld: False
+r_openshift_storage_nfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_storage_nfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
r_openshift_storage_nfs_os_firewall_deny: []
r_openshift_storage_nfs_os_firewall_allow:
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index a6b8a40c8..c0ea00f34 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -60,13 +60,16 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers
+ # - may containe dots (https://github.com/openshift/openshift-ansible/issues/5192)
+ #
- name: (Enterprise) Verify openshift_image_tag is valid
when: openshift.common.deployment_type == 'openshift-enterprise'
assert:
that:
- - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}"
+ - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"
msg: |-
- openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4
+ openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
+ v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
You specified openshift_image_tag={{ openshift_image_tag }}
# Make sure we copy this to a fact if given a var:
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index 4d9f72f01..a2a579e9d 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_docker_use_crio | default(false) }}"
+ l_use_crio: "{{ openshift_use_crio | default(false) }}"
- name: Set containerized version to configure if openshift_image_tag specified
set_fact:
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index f96a80f1c..2cae94411 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -2,4 +2,4 @@
os_firewall_enabled: True
# firewalld is not supported on Atomic Host
# https://bugzilla.redhat.com/show_bug.cgi?id=1403331
-os_firewall_use_firewalld: "{{ False }}"
+os_firewall_use_firewalld: False
diff --git a/setup.py b/setup.py
index b9c34a8b8..c0c08b4d2 100644
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,7 @@ def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
if exclude_dirs is not None:
exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'
+ # Don't use include_dirs, it is broken
if include_dirs is not None:
include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'
@@ -47,6 +48,36 @@ def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
return found
+def find_entrypoint_playbooks():
+ '''find entry point playbooks as defined by openshift-ansible'''
+ playbooks = set()
+ included_playbooks = set()
+
+ exclude_dirs = ['adhoc', 'tasks']
+ for yaml_file in find_files(
+ os.path.join(os.getcwd(), 'playbooks'),
+ exclude_dirs, None, r'\.ya?ml$'):
+ with open(yaml_file, 'r') as contents:
+ for task in yaml.safe_load(contents) or {}:
+ if not isinstance(task, dict):
+ # Skip yaml files which are not a dictionary of tasks
+ continue
+ if 'include' in task:
+ # Add the playbook and capture included playbooks
+ playbooks.add(yaml_file)
+ included_file_name = task['include'].split()[0]
+ included_file = os.path.normpath(
+ os.path.join(os.path.dirname(yaml_file),
+ included_file_name))
+ included_playbooks.add(included_file)
+ elif 'hosts' in task:
+ playbooks.add(yaml_file)
+ # Evaluate the difference between all playbooks and included playbooks
+ entrypoint_playbooks = sorted(playbooks.difference(included_playbooks))
+ print('Entry point playbook count: {}'.format(len(entrypoint_playbooks)))
+ return entrypoint_playbooks
+
+
class OpenShiftAnsibleYamlLint(Command):
''' Command to run yamllint '''
description = "Run yamllint tests"
@@ -206,7 +237,7 @@ class OpenShiftAnsibleSyntaxCheck(Command):
user_options = []
# Colors
- FAIL = '\033[91m' # Red
+ FAIL = '\033[31m' # Red
ENDC = '\033[0m' # Reset
def initialize_options(self):
@@ -221,43 +252,56 @@ class OpenShiftAnsibleSyntaxCheck(Command):
''' run command '''
has_errors = False
- playbooks = set()
- included_playbooks = set()
+ print('Ansible Deprecation Checks')
+ exclude_dirs = ['adhoc', 'files', 'meta', 'test', 'tests', 'vars', '.tox']
for yaml_file in find_files(
- os.path.join(os.getcwd(), 'playbooks', 'byo'),
- None, None, r'\.ya?ml$'):
+ os.getcwd(), exclude_dirs, None, r'\.ya?ml$'):
with open(yaml_file, 'r') as contents:
- for task in yaml.safe_load(contents):
+ for task in yaml.safe_load(contents) or {}:
if not isinstance(task, dict):
- # Skip yaml files which do not contain plays or includes
+ # Skip yaml files which are not a dictionary of tasks
continue
- if 'include' in task:
- # Add the playbook and capture included playbooks
- playbooks.add(yaml_file)
- included_file_name = task['include'].split()[0]
- included_file = os.path.normpath(
- os.path.join(os.path.dirname(yaml_file),
- included_file_name))
- included_playbooks.add(included_file)
- elif 'hosts' in task:
- playbooks.add(yaml_file)
- # Evaluate the difference between all playbooks and included playbooks
- entrypoint_playbooks = sorted(playbooks.difference(included_playbooks))
- print('Entry point playbook count: {}'.format(len(entrypoint_playbooks)))
- # Syntax each entry point playbook
- for playbook in entrypoint_playbooks:
+ if 'when' in task:
+ if '{{' in task['when'] or '{%' in task['when']:
+ print('{}Error: Usage of Jinja2 templating delimiters '
+ 'in when conditions is deprecated in Ansible 2.3.\n'
+ ' File: {}\n'
+ ' Found: "{}"{}'.format(
+ self.FAIL, yaml_file,
+ task['when'], self.ENDC))
+ has_errors = True
+ # TODO (rteague): This test will be enabled once we move to Ansible 2.4
+ # if 'include' in task:
+ # print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n'
+ # 'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n'
+ # ' File: {}\n'
+ # ' Found: "include: {}"{}'.format(
+ # self.FAIL, yaml_file, task['include'], self.ENDC))
+ # has_errors = True
+
+ print('Ansible Playbook Entry Point Syntax Checks')
+ for playbook in find_entrypoint_playbooks():
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
- try:
- subprocess.check_output(
- ['ansible-playbook', '-i localhost,',
- '--syntax-check', playbook]
- )
- except subprocess.CalledProcessError as cpe:
- print('{}Execution failed: {}{}'.format(
- self.FAIL, cpe, self.ENDC))
+
+ # Error on any entry points in 'common'
+ if 'common' in playbook:
+ print('{}Invalid entry point playbook. All playbooks must'
+ ' start in playbooks/byo{}'.format(self.FAIL, self.ENDC))
has_errors = True
+ # --syntax-check each entry point playbook
+ else:
+ try:
+ subprocess.check_output(
+ ['ansible-playbook', '-i localhost,',
+ '--syntax-check', playbook]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
+
if has_errors:
raise SystemExit(1)
diff --git a/tox.ini b/tox.ini
index 53a9222d8..899767833 100644
--- a/tox.ini
+++ b/tox.ini
@@ -22,6 +22,5 @@ commands =
pylint: python setup.py lint
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
- # TODO(rhcarvalho): check syntax of other important entrypoint playbooks
ansible_syntax: python setup.py ansible_syntax
integration: python -c 'print("run test/integration/run-tests.sh")'