summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--.travis.yml15
-rw-r--r--CONTRIBUTING.md111
-rw-r--r--Dockerfile3
-rw-r--r--README.md89
-rw-r--r--README_AEP.md233
-rw-r--r--README_CONTAINERIZED_INSTALLATION.md10
-rw-r--r--README_openstack.md1
-rw-r--r--README_vagrant.md53
-rw-r--r--Vagrantfile71
-rwxr-xr-xbin/cluster2
-rw-r--r--callback_plugins/default.py3
-rw-r--r--docs/best_practices_guide.adoc14
-rw-r--r--docs/style_guide.adoc2
-rw-r--r--filter_plugins/oo_filters.py31
-rw-r--r--filter_plugins/openshift_master.py2
-rw-r--r--inventory/README.md9
-rw-r--r--inventory/aws/hosts/ec2.ini2
-rw-r--r--inventory/byo/hosts.origin.example26
-rw-r--r--inventory/byo/hosts.ose.example26
-rw-r--r--inventory/hosts2
-rw-r--r--openshift-ansible.spec108
-rw-r--r--playbooks/adhoc/metrics_setup/README.md25
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml37
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics.yaml116
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/install.yml45
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml10
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/uninstall.yml16
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml3
-rw-r--r--playbooks/aws/openshift-cluster/list.yml11
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml18
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml12
-rw-r--r--playbooks/aws/openshift-cluster/update.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml26
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/README.md18
l---------playbooks/byo/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml96
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml98
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml100
-rw-r--r--playbooks/byo/openshift-master/restart.yml2
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml2
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml2
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml16
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml14
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml73
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml47
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml2
l---------playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml6
-rw-r--r--playbooks/common/openshift-etcd/service.yml2
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml2
-rw-r--r--playbooks/common/openshift-master/restart.yml74
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml26
-rw-r--r--playbooks/common/openshift-master/restart_hosts_pacemaker.yml25
-rw-r--r--playbooks/common/openshift-master/restart_services_pacemaker.yml10
-rw-r--r--playbooks/common/openshift-master/scaleup.yml2
-rw-r--r--playbooks/common/openshift-master/service.yml2
-rw-r--r--playbooks/common/openshift-nfs/service.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml2
-rw-r--r--playbooks/common/openshift-node/service.yml2
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/list.yml16
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml7
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml4
-rw-r--r--playbooks/gce/openshift-cluster/update.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml16
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml2
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml4
-rw-r--r--requirements.txt2
-rw-r--r--roles/docker/README.md2
-rw-r--r--roles/docker/meta/main.yml4
-rw-r--r--roles/docker/tasks/main.yml31
-rw-r--r--roles/docker/tasks/udev_workaround.yml2
-rw-r--r--roles/docker/templates/custom.conf.j25
-rw-r--r--roles/docker/vars/main.yml1
-rw-r--r--roles/etcd/etcdctl.sh11
-rw-r--r--roles/etcd/files/etcdctl.sh11
-rw-r--r--roles/etcd/tasks/etcdctl.yml11
-rw-r--r--roles/etcd/tasks/main.yml3
-rw-r--r--roles/etcd_common/defaults/main.yml2
-rwxr-xr-xroles/etcd_common/library/delegated_serial_command.py1
-rw-r--r--roles/flannel/tasks/main.yml21
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py2
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml6
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml2
-rw-r--r--roles/nickhammond.logrotate/tasks/main.yml2
-rw-r--r--roles/nuage_common/defaults/main.yaml2
-rw-r--r--roles/nuage_master/tasks/certificates.yml2
-rw-r--r--roles/nuage_master/tasks/serviceaccount.yml6
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j22
-rw-r--r--roles/nuage_node/tasks/certificates.yml2
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_certificate_expiry/README.md250
-rw-r--r--roles/openshift_certificate_expiry/defaults/main.yml8
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py88
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py637
-rw-r--r--roles/openshift_certificate_expiry/meta/main.yml16
-rw-r--r--roles/openshift_certificate_expiry/tasks/main.yml30
-rw-r--r--roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2124
-rw-r--r--roles/openshift_certificate_expiry/templates/save_json_results.j21
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py7
-rw-r--r--roles/openshift_cloud_provider/tasks/aws.yml12
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml16
-rw-r--r--roles/openshift_cloud_provider/tasks/main.yml3
-rw-r--r--roles/openshift_cloud_provider/vars/main.yml1
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh3
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json185
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json165
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json29
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json29
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json9
-rw-r--r--roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json28
-rw-r--r--roles/openshift_examples/tasks/main.yml16
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py168
-rw-r--r--roles/openshift_facts/tasks/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml4
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml2
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml4
-rw-r--r--roles/openshift_hosted/templates/registry_config.j283
-rw-r--r--roles/openshift_hosted_logging/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_logging/tasks/cleanup_logging.yaml4
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml35
-rw-r--r--roles/openshift_hosted_templates/defaults/main.yml10
-rw-r--r--roles/openshift_hosted_templates/files/v1.0/enterprise/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/logging-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.0/enterprise/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.0/origin/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/logging-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.0/origin/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.1/enterprise/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.1/enterprise/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.1/origin/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.1/origin/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.2/enterprise/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.2/enterprise/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.2/origin/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.2/origin/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/logging-deployer.yaml)6
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/metrics-deployer.yaml)2
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml (renamed from roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/origin/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/logging-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/origin/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml (renamed from roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml)26
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml)14
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml (renamed from roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/registry-console.yaml)0
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/origin/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/logging-deployer.yaml)20
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/metrics-deployer.yaml)6
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml (renamed from roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/registry-console.yaml)0
-rw-r--r--roles/openshift_hosted_templates/meta/main.yml15
-rwxr-xr-xroles/openshift_hosted_templates/sync-templates.sh21
-rw-r--r--roles/openshift_hosted_templates/tasks/main.yml65
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml10
-rw-r--r--roles/openshift_manage_node/tasks/main.yml2
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml8
-rw-r--r--roles/openshift_master/handlers/main.yml2
-rw-r--r--roles/openshift_master/tasks/main.yml2
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j23
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j21
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_metrics/defaults/main.yml2
-rw-r--r--roles/openshift_metrics/handlers/main.yml2
-rw-r--r--roles/openshift_metrics/tasks/install.yml33
-rw-r--r--roles/openshift_metrics/tasks/main.yaml6
-rw-r--r--roles/openshift_node/tasks/main.yml12
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml21
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_node/templates/openvswitch-avoid-oom.conf3
-rw-r--r--roles/openshift_node/vars/main.yml3
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml4
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh47
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml6
-rw-r--r--roles/openshift_projects/tasks/main.yml2
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml2
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml5
-rw-r--r--roles/os_firewall/README.md2
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py4
-rw-r--r--roles/os_firewall/meta/main.yml1
-rw-r--r--setup.cfg2
-rw-r--r--utils/Makefile12
-rw-r--r--utils/docs/man/man1/atomic-openshift-installer.118
-rw-r--r--utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in6
-rw-r--r--utils/setup.py5
-rwxr-xr-xutils/site_assets/oo-install-bootstrap.sh2
-rw-r--r--utils/src/data/data_file1
-rw-r--r--utils/src/ooinstall/cli_installer.py516
-rw-r--r--utils/src/ooinstall/oo_config.py12
-rw-r--r--utils/src/ooinstall/openshift_ansible.py17
-rw-r--r--utils/src/ooinstall/utils.py11
-rw-r--r--utils/src/ooinstall/variants.py7
-rw-r--r--utils/test-requirements.txt1
-rw-r--r--utils/test/cli_installer_tests.py59
-rw-r--r--utils/test/fixture.py17
-rw-r--r--utils/test/test_utils.py100
-rw-r--r--utils/workflows/enterprise_deploy/openshift.sh2
236 files changed, 3897 insertions, 2239 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 56c99bf4c..bde176e44 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.4.7-1 ./
+3.4.17-1 ./
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 000000000..001bfdc39
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,15 @@
+sudo: false
+
+language: python
+python:
+ - "2.7"
+
+install:
+ - pip install -r requirements.txt
+
+script:
+ # TODO(rhcarvalho): check syntax of other important entrypoint playbooks
+ - ansible-playbook --syntax-check playbooks/byo/config.yml
+ # TODO(rhcarvalho): update make ci to pick up these tests
+ - nosetests --tests=test
+ - cd utils && make ci
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 000000000..1145da495
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,111 @@
+# Contributing
+
+Thank you for contributing to OpenShift Ansible. This document explains how the
+repository is organized, and how to submit contributions.
+
+## Introduction
+
+Before submitting code changes, get familiarized with these documents:
+
+- [Core Concepts](https://github.com/openshift/openshift-ansible/blob/master/docs/core_concepts_guide.adoc)
+- [Best Practices Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/best_practices_guide.adoc)
+- [Style Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/style_guide.adoc)
+
+## Repository structure
+
+### Ansible
+
+```
+.
+├── inventory Contains dynamic inventory scripts, and examples of
+│ Ansible inventories.
+├── library Contains Python modules used by the playbooks.
+├── playbooks Contains Ansible playbooks targeting multiple use cases.
+└── roles Contains Ansible roles, units of shared behavior among
+ playbooks.
+```
+
+#### Ansible plugins
+
+These are plugins used in playbooks and roles:
+
+```
+.
+├── ansible-profile
+├── callback_plugins
+├── filter_plugins
+└── lookup_plugins
+```
+
+### Scripts
+
+```
+.
+├── bin [DEPRECATED] Contains the `bin/cluster` script, a
+│ wrapper around the Ansible playbooks that ensures proper
+│ configuration, and facilitates installing, updating,
+│ destroying and configuring OpenShift clusters.
+│ Note: this tool is kept in the repository for legacy
+│ reasons and will be removed at some point.
+└── utils Contains the `atomic-openshift-installer` command, an
+ interactive CLI utility to install OpenShift across a
+ set of hosts.
+```
+
+### Documentation
+
+```
+.
+└── docs Contains documentation for this repository.
+```
+
+### Tests
+
+```
+.
+└── test Contains tests.
+```
+
+### Others
+
+```
+.
+└── git Contains some helper scripts for repository maintenance.
+```
+
+## Building RPMs
+
+See the [RPM build instructions](BUILD.md).
+
+## Running tests
+
+We use [Nose](http://readthedocs.org/docs/nose/) as a test runner. Make sure it
+is installed along with other test dependencies:
+
+```
+pip install -r utils/test-requirements.txt
+```
+
+Run the tests with:
+
+```
+nosetests
+```
+
+## Submitting contributions
+
+1. Go through the guides from the [introduction](#Introduction).
+2. Fork this repository, and create a work branch in your fork.
+3. Make changes and commit. You may want to review your changes and run tests
+ before pushing your branch.
+4. Open a Pull Request.
+
+One of the repository maintainers will then review the PR and submit it for
+testing.
+
+The `default` test job is publicly accessible at
+https://ci.openshift.redhat.com/jenkins/job/openshift-ansible/. The other jobs
+are run on a different Jenkins host that is not publicly accessible, however the
+test results are posted to S3 buckets when complete.
+
+The test output of each job is also posted to the Pull Request as comments.
diff --git a/Dockerfile b/Dockerfile
index 70f6f8a18..f3d45837a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -8,6 +8,9 @@ LABEL Version="v3.1.1.901"
LABEL Release="6"
LABEL BZComponent="aos3-installation-docker"
LABEL Architecture="x86_64"
+LABEL io.k8s.description="Ansible code and playbooks for installing Openshift Container Platform." \
+ io.k8s.display-name="Openshift Installer" \
+ io.openshift.tags="openshift,installer"
RUN INSTALL_PKGS="atomic-openshift-utils" && \
yum install -y --enablerepo=rhel-7-server-ose-3.2-rpms $INSTALL_PKGS && \
diff --git a/README.md b/README.md
index f8f8bfb70..bb8f56824 100644
--- a/README.md
+++ b/README.md
@@ -1,58 +1,59 @@
[![Join the chat at https://gitter.im/openshift/openshift-ansible](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/openshift/openshift-ansible)
+[![Build Status](https://travis-ci.org/openshift/openshift-ansible.svg?branch=master)](https://travis-ci.org/openshift/openshift-ansible)
-#OpenShift Ansible
+# OpenShift Ansible
-This repo contains Ansible code for OpenShift. This repo and the origin RPMs
-that it installs currently require a package that provides `docker`. Currently
-the RPMs provided from dockerproject.org do not provide this requirement, though
-they may in the future.
+This repository contains [Ansible](https://www.ansible.com/) code to install,
+upgrade and manage [OpenShift](https://www.openshift.com/) clusters.
-##Branches and tags
+**Note**: the Ansible playbooks in this repository require an RPM package that
+provides `docker`. Currently, the RPMs from
+[dockerproject.org](https://dockerproject.org/) do not provide this requirement,
+though they may in the future. This limitation is being tracked by
+[#2720](https://github.com/openshift/openshift-ansible/issues/2720).
-The master branch tracks our current work and should be compatible with both
-Origin master branch and the most recent Origin stable release. Currently that's
-v1.4 and v1.3.x. In addition to the master branch we maintain stable branches
-corresponding to upstream Origin releases, ie: release-1.2. The most recent of
-branch will often receive minor feature backports and fixes. Older branches will
-receive only critical fixes.
+## Branches and tags
+
+The [master branch](https://github.com/openshift/openshift-ansible/tree/master)
+tracks our current work and should be compatible with both [Origin master
+branch](https://github.com/openshift/origin/tree/master) and the [most recent
+Origin stable release](https://github.com/openshift/origin/releases). Currently
+that's v1.4 and v1.3.x. In addition to the master branch, we maintain stable
+branches corresponding to upstream Origin releases, e.g.:
+[release-1.2](https://github.com/openshift/openshift-ansible/tree/release-1.2).
+The most recent branch will often receive minor feature backports and fixes.
+Older branches will receive only critical fixes.
Releases are tagged periodically from active branches and are versioned 3.x
corresponding to Origin releases 1.x. We unfortunately started with 3.0 and it's
not practical to start over at 1.0.
-##Setup
-- Install base dependencies:
- - Fedora:
- ```
- dnf install -y ansible-2.1.0.0 pyOpenSSL python-cryptography
- ```
- - OSX:
- ```
- # Install ansible 2.1.0.0 and python 2
- brew install ansible python
- ```
-- Setup for a specific cloud:
+## Setup
+
+1. Install base dependencies:
+
+ ***
+
+ Requirements:
+ - Ansible >= 2.1.0 (>= 2.2 is preferred for performance reasons)
+ - Jinja >= 2.7
+
+ ***
+
+ Fedora:
+ ```
+ dnf install -y ansible pyOpenSSL python-cryptography
+ ```
+
+2. Setup for a specific cloud:
+
- [AWS](http://github.com/openshift/openshift-ansible/blob/master/README_AWS.md)
- [GCE](http://github.com/openshift/openshift-ansible/blob/master/README_GCE.md)
- [local VMs](http://github.com/openshift/openshift-ansible/blob/master/README_libvirt.md)
+ - Bring your own host deployments:
+ - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
+ - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+
+## Contributing
-- Bring your own host deployments:
- - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
- - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
- - [Atomic Enterprise](http://github.com/openshift/openshift-ansible/blob/master/README_AEP.md)
-
-- Build
- - [How to build the openshift-ansible rpms](BUILD.md)
-
-- Directory Structure:
- - [bin/cluster](https://github.com/openshift/openshift-ansible/tree/master/bin/cluster) - python script to easily create clusters
- - [docs](https://github.com/openshift/openshift-ansible/tree/master/docs) - Documentation for the project
- - [filter_plugins/](https://github.com/openshift/openshift-ansible/tree/master/filter_plugins) - custom filters used to manipulate data in Ansible
- - [inventory/](https://github.com/openshift/openshift-ansible/tree/master/inventory) - houses Ansible dynamic inventory scripts
- - [playbooks/](https://github.com/openshift/openshift-ansible/tree/master/playbooks) - houses host-type Ansible playbooks (launch, config, destroy, vars)
- - [roles/](https://github.com/openshift/openshift-ansible/tree/master/roles) - shareable Ansible tasks
-
-##Contributing
-- [Best Practices Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/best_practices_guide.adoc)
-- [Core Concepts](https://github.com/openshift/openshift-ansible/blob/master/docs/core_concepts_guide.adoc)
-- [Style Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/style_guide.adoc)
+See the [contribution guide](CONTRIBUTING.md).
diff --git a/README_AEP.md b/README_AEP.md
deleted file mode 100644
index c588ebbd3..000000000
--- a/README_AEP.md
+++ /dev/null
@@ -1,233 +0,0 @@
-# Installing AEP from dev puddles using ansible
-
-* [Requirements](#requirements)
-* [Caveats](#caveats)
-* [Known Issues](#known-issues)
-* [Configuring the host inventory](#configuring-the-host-inventory)
-* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
-* [Running the ansible playbooks](#running-the-ansible-playbooks)
-* [Post-ansible steps](#post-ansible-steps)
-* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
-
-## Requirements
-* ansible 2.1.0.0
- * Available in Fedora channels
- * Available for EL with EPEL and Optional channel
-* One or more RHEL 7.1 VMs
-* Either ssh key based auth for the root user or ssh key based auth for a user
- with sudo access (no password)
-* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
-
- ```sh
- git clone https://github.com/openshift/openshift-ansible.git
- cd openshift-ansible
- ```
-
-## Caveats
-This ansible repo is currently under heavy revision for providing OSE support;
-the following items are highly likely to change before the OSE support is
-merged into the upstream repo:
- * the current git branch for testing
- * how the inventory file should be configured
- * variables that need to be set
- * bootstrapping steps
- * other configuration steps
-
-## Known Issues
-* Host subscriptions are not configurable yet, the hosts need to be
- pre-registered with subscription-manager or have the RHEL base repo
- pre-configured. If using subscription-manager the following commands will
- disable all but the rhel-7-server rhel-7-server-extras and
- rhel-server7-ose-beta repos:
-```sh
-subscription-manager repos --disable="*"
-subscription-manager repos \
---enable="rhel-7-server-rpms" \
---enable="rhel-7-server-extras-rpms" \
---enable="rhel-7-server-ose-3.0-rpms"
-```
-* Configuration of router is not automated yet
-* Configuration of docker-registry is not automated yet
-
-## Configuring the host inventory
-[Ansible docs](http://docs.ansible.com/intro_inventory.html)
-
-Example inventory file for configuring one master and two nodes for the test
-environment. This can be configured in the default inventory file
-(/etc/ansible/hosts), or using a custom file and passing the --inventory
-option to ansible-playbook.
-
-/etc/ansible/hosts:
-```ini
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# SSH user, this user should allow ssh based auth without requiring a password
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_become must be set to true
-#ansible_become=yes
-
-# See DEPLOYMENT_TYPES.md
-deployment_type=atomic-enterprise
-
-# Pre-release registry URL; note that in the future these images
-# may have an atomicenterprise/aep- prefix or so.
-oreg_url=rcm-img-docker:5001/openshift3/ose-${component}:${version}
-
-# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
-
-# host group for masters
-[masters]
-aep3-master.example.com
-
-# host group for nodes
-[nodes]
-aep3-node[1:2].example.com
-```
-
-The hostnames above should resolve both from the hosts themselves and
-the host where ansible is running (if different).
-
-A more complete example inventory file ([hosts.aep.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.aep.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory.
-
-## Running the ansible playbooks
-From the openshift-ansible checkout run:
-```sh
-ansible-playbook playbooks/byo/config.yml
-```
-**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
-inventory file use the -i option for ansible-playbook.
-
-## Post-ansible steps
-#### Create the default router
-On the master host:
-```sh
-oadm router --create=true \
- --service-account=router \
- --credentials=/etc/origin/master/openshift-router.kubeconfig \
- --images='rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}'
-```
-
-#### Create the default docker-registry
-On the master host:
-```sh
-oadm registry --create=true \
- --service-account=registry \
- --credentials=/etc/origin/master/openshift-registry.kubeconfig \
- --images='rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}' \
- --mount-host=/var/lib/openshift/docker-registry
-```
-
-## Overriding detected ip addresses and hostnames
-Some deployments will require that the user override the detected hostnames
-and ip addresses for the hosts. To see what the default values will be you can
-run the openshift_facts playbook:
-```sh
-ansible-playbook playbooks/byo/openshift_facts.yml
-```
-The output will be similar to:
-```
-ok: [10.3.9.45] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
- "ip": "172.16.4.79",
- "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
- "public_ip": "10.3.9.45",
- "use_openshift_sdn": true
- },
- "provider": {
- ... <snip> ...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-ok: [10.3.9.42] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
- "ip": "172.16.4.75",
- "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
- "public_ip": "10.3.9.42",
- "use_openshift_sdn": true
- },
- "provider": {
- ...<snip>...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-ok: [10.3.9.36] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
- "ip": "172.16.4.73",
- "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
- "public_ip": "10.3.9.36",
- "use_openshift_sdn": true
- },
- "provider": {
- ...<snip>...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-```
-Now, we want to verify the detected common settings to verify that they are
-what we expect them to be (if not, we can override them).
-
-* hostname
- * Should resolve to the internal ip from the instances themselves.
- * openshift_hostname will override.
-* ip
- * Should be the internal ip of the instance.
- * openshift_ip will override.
-* public hostname
- * Should resolve to the external ip from hosts outside of the cloud
- * provider openshift_public_hostname will override.
-* public_ip
- * Should be the externally accessible ip associated with the instance
- * openshift_public_ip will override
-* use_openshift_sdn
- * Should be true unless the cloud is GCE.
- * openshift_use_openshift_sdn overrides
-
-To override the the defaults, you can set the variables in your inventory:
-```
-...snip...
-[masters]
-ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
-...snip...
-```
diff --git a/README_CONTAINERIZED_INSTALLATION.md b/README_CONTAINERIZED_INSTALLATION.md
index c615154ef..0a0ebb836 100644
--- a/README_CONTAINERIZED_INSTALLATION.md
+++ b/README_CONTAINERIZED_INSTALLATION.md
@@ -31,7 +31,7 @@ native clients.
The wrapper scripts mount a limited subset of paths, _~/.kube_, _/etc/origin/_,
and _/tmp_. Be mindful of this when passing in files to be processed by `oc` or
`oadm`. You may find it easier to redirect input like this :
-
+
`oc create -f - < my_file.json`
## Technical Notes
@@ -48,18 +48,18 @@ before attempting to pull any of the following images.
openshift/origin
openshift/node (node + openshift-sdn + openvswitch rpm for client tools)
openshift/openvswitch (centos7 + openvswitch rpm, runs ovsdb ovsctl processes)
- registry.access.redhat.com/rhel7/etcd
+ registry.access.redhat.com/rhel7/etcd3
OpenShift Enterprise
openshift3/ose
openshift3/node
openshift3/openvswitch
- registry.access.redhat.com/rhel7/etcd
+ registry.access.redhat.com/rhel7/etcd3
Atomic Enterprise Platform
aep3/aep
aep3/node
aep3/openvswitch
- registry.access.redhat.com/rhel7/etcd
-
+ registry.access.redhat.com/rhel7/etcd3
+
* note openshift3/* and aep3/* images come from registry.access.redhat.com and
rely on the --additional-repository flag being set appropriately.
diff --git a/README_openstack.md b/README_openstack.md
index 1998a5878..d3d1f9052 100644
--- a/README_openstack.md
+++ b/README_openstack.md
@@ -25,6 +25,7 @@ On Fedora:
On RHEL / CentOS:
```
yum install -y ansible python-novaclient python-neutronclient python-heatclient
+ sudo pip install shade
```
Configuration
diff --git a/README_vagrant.md b/README_vagrant.md
index bda474f14..cb62e31d8 100644
--- a/README_vagrant.md
+++ b/README_vagrant.md
@@ -1,52 +1 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
-
-Requirements
-------------
-- ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient).
-- vagrant (tested against version 1.7.2)
-- vagrant-hostmanager plugin (tested against version 1.5.0)
-- vagrant-libvirt (tested against version 0.0.26)
- - Only required if using libvirt instead of virtualbox
-
-For ``enterprise`` deployment types the base RHEL box has to be added to Vagrant:
-
-1. Download the RHEL7 vagrant image (libvirt or virtualbox) available from the [Red Hat Container Development Kit downloads in the customer portal](https://access.redhat.com/downloads/content/293/ver=1/rhel---7/1.0.1/x86_64/product-downloads)
-
-2. Install it into vagrant
-
- ``$ vagrant box add --name rhel-7 /path/to/rhel-server-libvirt-7.1-3.x86_64.box``
-
-3. (optional, recommended) Increase the disk size of the image to 20GB - This is a two step process. (these instructions are specific to libvirt)
-
- Resize the actual qcow2 image:
-
- ``$ qemu-img resize ~/.vagrant.d/boxes/rhel-7/0/libvirt/box.img 20GB``
-
- Edit `~/.vagrant.d/boxes/rhel-7/0/libvirt/metadata.json` to reflect the new size. A corrected metadata.json looks like this:
-
- ``{"provider": "libvirt", "format": "qcow2", "virtual_size": 20}``
-
-Usage
------
-```
-vagrant up --no-provision
-vagrant provision
-```
-
-Using libvirt:
-```
-vagrant up --provider=libvirt --no-provision
-vagrant provision
-```
-
-Environment Variables
----------------------
-The following environment variables can be overriden:
-- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, openshift-enterprise)
-- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
-
-Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role:
-
-- ``rhel_subscription_user``: rhsm user
-- ``rhel_subscription_pass``: rhsm password
-- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects
+The Vagrant-based installation has been moved to: https://github.com/openshift/openshift-ansible-contrib/tree/master/vagrant
diff --git a/Vagrantfile b/Vagrantfile
deleted file mode 100644
index a38378289..000000000
--- a/Vagrantfile
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-VAGRANTFILE_API_VERSION = "2"
-
-unless Vagrant.has_plugin?("vagrant-hostmanager")
- raise 'vagrant-hostmanager plugin is required'
-end
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
-
- deployment_type = ENV['OPENSHIFT_DEPLOYMENT_TYPE'] || 'origin'
- num_nodes = (ENV['OPENSHIFT_NUM_NODES'] || 2).to_i
-
- config.hostmanager.enabled = true
- config.hostmanager.manage_host = true
- config.hostmanager.include_offline = true
- config.ssh.insert_key = false
-
- config.vm.provider "virtualbox" do |vbox, override|
- override.vm.box = "centos/7"
- vbox.memory = 1024
- vbox.cpus = 2
-
- # Enable multiple guest CPUs if available
- vbox.customize ["modifyvm", :id, "--ioapic", "on"]
- end
-
- config.vm.provider "libvirt" do |libvirt, override|
- libvirt.cpus = 2
- libvirt.memory = 1024
- libvirt.driver = 'kvm'
- case deployment_type
- when "openshift-enterprise"
- override.vm.box = "rhel-7"
- when "atomic-enterprise"
- override.vm.box = "rhel-7"
- when "origin"
- override.vm.box = "centos/7"
- override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05"
- override.vm.box_download_checksum_type = "sha256"
- end
- end
-
- num_nodes.times do |n|
- node_index = n+1
- config.vm.define "node#{node_index}" do |node|
- node.vm.hostname = "ose3-node#{node_index}.example.com"
- node.vm.network :private_network, ip: "192.168.100.#{200 + n}"
- config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service"
- end
- end
-
- config.vm.define "master" do |master|
- master.vm.hostname = "ose3-master.example.com"
- master.vm.network :private_network, ip: "192.168.100.100"
- master.vm.network :forwarded_port, guest: 8443, host: 8443
- config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service"
- master.vm.provision "ansible" do |ansible|
- ansible.limit = 'all'
- ansible.sudo = true
- ansible.groups = {
- "masters" => ["master"],
- "nodes" => ["master", "node1", "node2"],
- }
- ansible.extra_vars = {
- deployment_type: deployment_type,
- }
- ansible.playbook = "playbooks/byo/vagrant.yml"
- end
- end
-end
diff --git a/bin/cluster b/bin/cluster
index 68d2a7cd4..b9b2ab15f 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -314,7 +314,7 @@ This wrapper is overriding the following ansible variables:
* ANSIBLE_SSH_PIPELINING:
If not set in the environment, this wrapper will set it to `True`.
- If you experience issue with Ansible ssh pipelining, you can disable it by explicitely set this environment variable to `False`.
+ If you experience issues with Ansible SSH pipelining, you can disable it by explicitly setting this environment variable to `False`.
'''
)
parser.add_argument('-v', '--verbose', action='count',
diff --git a/callback_plugins/default.py b/callback_plugins/default.py
index bc0b207bb..c64145b5c 100644
--- a/callback_plugins/default.py
+++ b/callback_plugins/default.py
@@ -45,6 +45,9 @@ class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
+ def __init__(self, *args, **kwargs):
+ BASECLASS.__init__(self, *args, **kwargs)
+
def _dump_results(self, result):
'''Return the text to output for a result.'''
result['_ansible_verbose_always'] = True
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index 267aa850d..e9d904965 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -52,11 +52,11 @@ If mode lines for other editors are needed, please open a GitHub issue.
=== Method Signatures
'''
-[[When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used]]
+[[When-adding-a-new-parameter-to-an-existing-method-a-default-value-SHOULD-be-used]]
[cols="2v,v"]
|===
-| <<When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
-| When adding a new paramemter to an existing method, a default value SHOULD be used
+| <<When-adding-a-new-parameter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
+| When adding a new parameter to an existing method, a default value SHOULD be used
|===
The purpose of this rule is to make it so that method signatures are backwards compatible.
@@ -76,7 +76,7 @@ def add_person(first_name, last_name, age=None):
=== PyLint
-http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
+http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as manageable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
'''
[[PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file]]
@@ -194,7 +194,7 @@ The purpose of this rule is to make it easy to include custom modules in our pla
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
|===
-When a module has several parameters that are being passed in, it's hard to see exactly what value each parameter is getting. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
+When a module has several parameters that are being passed in, it's hard to see exactly what value each parameter is getting. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each parameter.
.Bad:
[source,yaml]
@@ -222,7 +222,7 @@ When a module has several parameters that are being passed in, it's hard to see
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
|===
-Lines that are long quickly become a wall of text that isn't easily parsable. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
+Lines that are long quickly become a wall of text that isn't easily parsable. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each parameter.
.Bad:
[source,yaml]
@@ -432,7 +432,7 @@ This is very useful when developing and debugging new tasks. It can also signifi
[[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent]]
[cols="2v,v"]
|===
-| [[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule]]
+| <<Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule>>
| Ansible Roles SHOULD be named like technology_component[_subcomponent].
|===
diff --git a/docs/style_guide.adoc b/docs/style_guide.adoc
index 72eaedcf9..2c2cb8610 100644
--- a/docs/style_guide.adoc
+++ b/docs/style_guide.adoc
@@ -103,7 +103,7 @@ Ansible role variables are defined as variables contained in (or passed into) a
[cols="2v,v"]
|===
| <<Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules, Rule>>
-| Role variables MUST have a prefix of atleast 3 characters. See below for specific naming rules.
+| Role variables MUST have a prefix of at least 3 characters. See below for specific naming rules.
|===
==== Role with 3 (or more) words in the name
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 5358a244e..38bc3ad6b 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -234,9 +234,9 @@ class FilterModule(object):
arrange them as a string 'key=value key=value'
"""
if not isinstance(data, dict):
- raise errors.AnsibleFilterError("|failed expects first param is a dict")
+ raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
- return out_joiner.join([in_joiner.join([k, v]) for k, v in data.items()])
+ return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
@staticmethod
def oo_ami_selector(data, image_name):
@@ -286,7 +286,7 @@ class FilterModule(object):
}
"""
if not isinstance(data, dict):
- raise errors.AnsibleFilterError("|failed expects first param is a dict")
+ raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
if host_type not in ['master', 'node', 'etcd']:
raise errors.AnsibleFilterError("|failed expects etcd, master or node"
" as the host type")
@@ -608,8 +608,8 @@ class FilterModule(object):
host_type=_get_tag_value(host['group_names'], 'host-type'),
sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
host={'name': host['inventory_hostname'],
- 'public IP': host['ansible_ssh_host'],
- 'private IP': host['ansible_default_ipv4']['address']})
+ 'public IP': host['oo_public_ipv4'],
+ 'private IP': host['oo_private_ipv4']})
except KeyError:
pass
return clusters
@@ -889,11 +889,32 @@ class FilterModule(object):
'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
return loadbalancer_backends
+ @staticmethod
+ def oo_chomp_commit_offset(version):
+ """Chomp any "+git.foo" commit offset string from the given `version`
+ and return the modified version string.
+
+ Ex:
+ - chomp_commit_offset(None) => None
+ - chomp_commit_offset(1337) => "1337"
+ - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
+ - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
+ - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
+ """
+ if version is None:
+ return version
+ else:
+ # Stringify, just in case it's a Number type. Split by '+' and
+ # return the first split. No concerns about strings without a
+ # '+', .split() returns an array of the original string.
+ return str(version).split('+')[0]
+
def filters(self):
""" returns a mapping of filters to methods """
return {
"oo_select_keys": self.oo_select_keys,
"oo_select_keys_from_list": self.oo_select_keys_from_list,
+ "oo_chomp_commit_offset": self.oo_chomp_commit_offset,
"oo_collect": self.oo_collect,
"oo_flatten": self.oo_flatten,
"oo_pdb": self.oo_pdb,
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index ee6a62ba5..8d3f31169 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -75,7 +75,7 @@ class IdentityProviderBase(object):
valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']
if self.mapping_method not in valid_mapping_methods:
- raise errors.AnsibleFilterError("|failed unkown mapping method "
+ raise errors.AnsibleFilterError("|failed unknown mapping method "
"for provider {0}".format(self.__class__.__name__))
self._required = []
self._optional = []
diff --git a/inventory/README.md b/inventory/README.md
new file mode 100644
index 000000000..b8edfcbb0
--- /dev/null
+++ b/inventory/README.md
@@ -0,0 +1,9 @@
+# OpenShift Ansible inventory config files
+
+You can install OpenShift on:
+
+* [Amazon Web Services](aws/hosts/)
+* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
+* [GCE](gce/) (Google Compute Engine)
+* [libvirt](libviert/hosts/)
+* [OpenStack](openstack/hosts/)
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
index aa0f9090f..5ee51c84f 100644
--- a/inventory/aws/hosts/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
@@ -60,7 +60,7 @@ all_instances = False
# By default, only EC2 instances in the 'running' state are returned. Specify
# EC2 instance states to return as a comma-separated list. This
-# option is overriden when 'all_instances' is True.
+# option is overridden when 'all_instances' is True.
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
# By default, only RDS instances in the 'available' state are returned. Set
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 0f0e223ce..e769537f9 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -30,7 +30,7 @@ deployment_type=origin
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v1.2
+openshift_release=v1.4
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
@@ -351,6 +351,22 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
+#
+# Any S3 service (Minio, ExoScale, ...): Basically the same as above
+# but with regionendpoint configured
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
+#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
# Metrics deployment
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
@@ -456,7 +472,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
-#osm_cluster_network_cidr=10.1.0.0/16
+#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -476,9 +492,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-# Configure number of bits to allocate to each host’s subnet e.g. 8
-# would mean a /24 network on the host.
-#osm_host_subnet_length=8
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+#osm_host_subnet_length=9
# Configure master API and console ports.
#openshift_master_api_port=8443
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 2e5b7564d..be919c105 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -30,7 +30,7 @@ deployment_type=openshift-enterprise
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.2
+openshift_release=v3.4
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
@@ -351,6 +351,22 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
+#
+# Any S3 service (Minio, ExoScale, ...): Basically the same as above
+# but with regionendpoint configured
+# S3 bucket must already exist.
+#openshift_hosted_registry_storage_kind=object
+#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_accesskey=access_key_id
+#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
+#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
+#openshift_hosted_registry_storage_s3_bucket=bucket_name
+#openshift_hosted_registry_storage_s3_region=bucket_region
+#openshift_hosted_registry_storage_s3_chunksize=26214400
+#openshift_hosted_registry_storage_s3_rootdirectory=/registry
+#openshift_hosted_registry_pullthrough=true
+#openshift_hosted_registry_acceptschema2=true
+#openshift_hosted_registry_enforcequota=true
# Metrics deployment
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
@@ -456,7 +472,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
-#osm_cluster_network_cidr=10.1.0.0/16
+#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -476,9 +492,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-# Configure number of bits to allocate to each host’s subnet e.g. 8
-# would mean a /24 network on the host.
-#osm_host_subnet_length=8
+# Configure number of bits to allocate to each host’s subnet e.g. 9
+# would mean a /23 network on the host.
+#osm_host_subnet_length=9
# Configure master API and console ports.
#openshift_master_api_port=8443
diff --git a/inventory/hosts b/inventory/hosts
deleted file mode 100644
index 72b7ae646..000000000
--- a/inventory/hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
-localhost
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index bf0f1f3e2..4961d23ef 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.4.7
+Version: 3.4.17
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -13,7 +13,7 @@ URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible >= 2.1.0.0
+Requires: ansible >= 2.2.0.0-1
Requires: python2
Requires: openshift-ansible-docs = %{version}-%{release}
@@ -249,6 +249,110 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Nov 04 2016 Scott Dodson <sdodson@redhat.com> 3.4.17-1
+- Fix indentation for flannel etcd vars (smunilla@redhat.com)
+- Update hosted_templates (sdodson@redhat.com)
+- remove console exclusions (sdodson@redhat.com)
+- Restart API service always as well. (dgoodwin@redhat.com)
+- Update v1.4 content (sdodson@redhat.com)
+- Update quick installer upgrade mappings for 3.4 (smunilla@redhat.com)
+- Update flannel etcd vars for 0.5.5 (smunilla@redhat.com)
+- Where we use curl force it to use tlsv1.2 (sdodson@redhat.com)
+- Bump etcd_ca_default_days to 5 years. (abutcher@redhat.com)
+- Update master_lb vs cluster_hostname workflow (smunilla@redhat.com)
+
+* Wed Nov 02 2016 Scott Dodson <sdodson@redhat.com> 3.4.16-1
+- Fix HA environments incorrectly detecting mixed installed environments
+ (tbielawa@redhat.com)
+- Deploy an OOM systemd override for openvswitch. (dgoodwin@redhat.com)
+- Only restart dnsmasq if the DNS servers have changed (tbielawa@redhat.com)
+- Update installation summary for etcd members (smunilla@redhat.com)
+- Fix changed_when (sdodson@redhat.com)
+- add io labels (tdawson@redhat.com)
+- Touch all ini_file files before using them (sdodson@redhat.com)
+- Remove commit offset strings from parsed versions (tbielawa@redhat.com)
+- Update variant_version (smunilla@redhat.com)
+
+* Mon Oct 31 2016 Troy Dawson <tdawson@redhat.com> 3.4.15-1
+- Bump documented openshift_release for 1.4/3.4. (dgoodwin@redhat.com)
+- Add requirements, fix a small formatting issue.
+ (erinn.looneytriggs@gmail.com)
+
+* Fri Oct 28 2016 Troy Dawson <tdawson@redhat.com> 3.4.14-1
+- Change HA master controller service to restart always. (dgoodwin@redhat.com)
+- Default hosted_registry_insecure true when insecure registry present in
+ existing /etc/sysconfig/docker. (abutcher@redhat.com)
+- Fix race condtion in openshift_facts (smunilla@redhat.com)
+
+* Wed Oct 26 2016 Troy Dawson <tdawson@redhat.com> 3.4.13-1
+- [upgrades] Fix containerized node (sdodson@redhat.com)
+- Add support for 3.4 upgrade. (dgoodwin@redhat.com)
+- Update link to latest versions upgrade README (ebballon@gmail.com)
+- Bump logging and metrics deployers to 3.3.1 and 3.4.0 (sdodson@redhat.com)
+- Remove Vagrantfile (jdetiber@redhat.com)
+- Enable dnsmasq service (sdodson@redhat.com)
+- Default infra template modification based on
+ openshift_examples_modify_imagestreams (abutcher@redhat.com)
+- Added a parameter for cert validity (vishal.patil@nuagenetworks.net)
+- Fix and reorder control plane service restart. (dgoodwin@redhat.com)
+- Add node-labels to kubeletArguments (tbielawa@redhat.com)
+
+* Mon Oct 24 2016 Troy Dawson <tdawson@redhat.com> 3.4.12-1
+- Move infrastructure templates into openshift_hosted_templates role.
+ (abutcher@redhat.com)
+- Unit tests for the debug_env logger thing (tbielawa@redhat.com)
+- a-o-i: Separate install and scaleup workflows (smunilla@redhat.com)
+- Reference full vars for registry object storage. (abutcher@redhat.com)
+
+* Fri Oct 21 2016 Troy Dawson <tdawson@redhat.com> 3.4.11-1
+- trouble creating service signer while running upgrade dockerized
+ (henning.fjellheim@nb.no)
+- Don't freak out if the oc command doesn't exist. (tbielawa@redhat.com)
+- Make the json template filter-driven. (tbielawa@redhat.com)
+- Add JSON result CLI parsing notes to the README (tbielawa@redhat.com)
+- The JSON result saving template now includes a summary of expired/warned
+ certs for easier parsing. (tbielawa@redhat.com)
+- Clean up lint and other little things (polish++) (tbielawa@redhat.com)
+- Fix playbooks, update readme, update default vars (tbielawa@redhat.com)
+- Refactor into a role (tbielawa@redhat.com)
+- Get router/registry certs. Collect common names and subjectAltNames
+ (tbielawa@redhat.com)
+- Support etcd certs now. Fix lint. Generate HTML report. (tbielawa@redhat.com)
+- Try to make boiler plate for cert expiry checking (tbielawa@redhat.com)
+- Override __init__ in default callback to avoid infinite loop.
+ (abutcher@redhat.com)
+- Drop pacemaker restart logic. (dgoodwin@redhat.com)
+- Fix typos (rhcarvalho@gmail.com)
+- Switch from "oadm" to "oc adm" and fix bug in binary sync.
+ (dgoodwin@redhat.com)
+- Remove uneeded import of ansible.module_utils.splitter (misc@redhat.com)
+
+* Wed Oct 19 2016 Troy Dawson <tdawson@redhat.com> 3.4.10-1
+- Get rid of openshift_node_config_file entirely (sdodson@redhat.com)
+- [logging] Fix NFS volume binding (sdodson@redhat.com)
+- Build full node config path in systemd_units tasks. (abutcher@redhat.com)
+- Default [] (abutcher@afrolegs.com)
+- Template with_items for upstream ansible-2.2 compat. (abutcher@redhat.com)
+
+* Mon Oct 17 2016 Troy Dawson <tdawson@redhat.com> 3.4.9-1
+- formatting updates in template (tobias@tobru.ch)
+- Do not error on node labels set too non-string values. (manuel@hutter.io)
+- Use inventory variables rather than facts (sdodson@redhat.com)
+- Resume restarting node after upgrading node rpms. (dgoodwin@redhat.com)
+- upgrade: Don't check avail docker version if not already installed.
+ (dgoodwin@redhat.com)
+- revise docs (tobias@tobru.ch)
+- adjustments in docs and j2 template (tobias@tobru.ch)
+- add regionendpoint parameter for registry s3 (tobias.brunner@vshn.ch)
+
+* Fri Oct 14 2016 Troy Dawson <tdawson@redhat.com> 3.4.8-1
+- update handling of use_dnsmasq (jdetiber@redhat.com)
+- Fix standalone docker upgrade playbook skipping nodes. (dgoodwin@redhat.com)
+- Fix missing play assignment in a-o-i callback plugin (tbielawa@redhat.com)
+- Stop restarting node after upgrading master rpms. (dgoodwin@redhat.com)
+- Fix upgrade mappings in quick installer (smunilla@redhat.com)
+- nfs: Handle seboolean aliases not just in Fedora (walters@verbum.org)
+
* Wed Oct 12 2016 Troy Dawson <tdawson@redhat.com> 3.4.7-1
- set defaults for debug_level in template and task (jhcook@gmail.com)
- Set HTTPS_PROXY in example builddefaults_json (sdodson@redhat.com)
diff --git a/playbooks/adhoc/metrics_setup/README.md b/playbooks/adhoc/metrics_setup/README.md
deleted file mode 100644
index 71aa1e109..000000000
--- a/playbooks/adhoc/metrics_setup/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-## Playbook for adding [Metrics](https://github.com/openshift/origin-metrics) to Openshift
-
-See OSE Ansible [readme](https://github.com/openshift/openshift-ansible/blob/master/README_OSE.md) for general install instructions. Playbook has been tested on OSE 3.1/RHEL7.2 cluster
-
-
-Add the following vars to `[OSEv3:vars]` section of your inventory file
-```
-[OSEv3:vars]
-# Enable cluster metrics
-use_cluster_metrics=true
-metrics_external_service=< external service name for metrics >
-metrics_image_prefix=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/
-metrics_image_version=3.1.0
-```
-
-Run playbook
-```
-ansible-playbook -i $INVENTORY_FILE playbooks/install.yml
-```
-
-## Contact
-Email: hawkular-dev@lists.jboss.org
-
-## Credits
-Playbook adapted from install shell scripts by Matt Mahoney
diff --git a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml b/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
deleted file mode 100644
index f70e0b18b..000000000
--- a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "List"
-metadata:
- name: metrics-deployer-setup
- annotations:
- description: "Required dependencies for the metrics deployer pod."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-items:
--
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
diff --git a/playbooks/adhoc/metrics_setup/files/metrics.yaml b/playbooks/adhoc/metrics_setup/files/metrics.yaml
deleted file mode 100644
index d823b2587..000000000
--- a/playbooks/adhoc/metrics_setup/files/metrics.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "hawkular/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
- name: REDEPLOY
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "1Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/install.yml b/playbooks/adhoc/metrics_setup/playbooks/install.yml
deleted file mode 100644
index a9ec3c1ef..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/install.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- include: master_config_facts.yml
-- name: "Install metrics"
- hosts: masters
- vars:
- metrics_public_url: "https://{{ metrics_external_service }}/hawkular/metrics"
- tasks:
- - name: "Add metrics url to master config"
- lineinfile: "state=present dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL' insertbefore='^\ \ publicURL' line='\ \ metricsPublicURL: {{ metrics_public_url }}'"
-
- - name: "Restart master service"
- service: name=atomic-openshift-master state=restarted
-
- - name: "Copy metrics-deployer yaml to remote"
- copy: "src=../files/metrics-deployer-setup.yaml dest=/tmp/metrics-deployer-setup.yaml force=yes"
-
- - name: "Add metrics-deployer"
- command: "{{item}}"
- run_once: true
- register: output
- failed_when: ('already exists' not in output.stderr) and (output.rc != 0)
- with_items:
- - oc project openshift-infra
- - oc create -f /tmp/metrics-deployer-setup.yaml
-
- - name: "Give metrics-deployer SA permissions"
- command: "oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer"
- run_once: true
-
- - name: "Give heapster SA permissions"
- command: "oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster"
- run_once: true
-
- - name: "Create metrics-deployer secret"
- command: "oc secrets new metrics-deployer nothing=/dev/null"
- register: output
- failed_when: ('already exists' not in output.stderr) and (output.rc != 0)
- run_once: true
-
- - name: "Copy metrics.yaml to remote"
- copy: "src=../files/metrics.yaml dest=/tmp/metrics.yaml force=yes"
-
- - name: "Process yml template"
- shell: "oc process -f /tmp/metrics.yaml -v MASTER_URL={{ masterPublicURL }},REDEPLOY=true,HAWKULAR_METRICS_HOSTNAME={{ metrics_external_service }},IMAGE_PREFIX={{ metrics_image_prefix }},IMAGE_VERSION={{ metrics_image_version }},USE_PERSISTENT_STORAGE=false | oc create -f -"
- run_once: true \ No newline at end of file
diff --git a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml b/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
deleted file mode 100644
index 65de11bc4..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: "Load master config"
- hosts: masters
- vars:
- master_config_file: "/tmp/ansible-metrics-{{ ansible_hostname }}"
- tasks:
- - name: "Fetch master config from remote"
- fetch: "src=/etc/origin/master/master-config.yaml dest={{ master_config_file }} flat=yes"
- - name: "Load config"
- include_vars: "{{ master_config_file }}"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml b/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
deleted file mode 100644
index 06c4586ee..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: "Uninstall metrics"
- hosts: masters
- tasks:
- - name: "Remove metrics url from master config"
- lineinfile: "state=absent dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL'"
-
- - name: "Delete metrics objects"
- command: "{{item}}"
- with_items:
- - oc delete all --selector=metrics-infra
- # - oc delete secrets --selector=metrics-infra
- # - oc delete sa --selector=metrics-infra
- - oc delete templates --selector=metrics-infra
- - oc delete sa metrics-deployer
- - oc delete secret metrics-deployer
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
index 79cae24ab..32fc7ce68 100644
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ b/playbooks/adhoc/noc/get_zabbix_problems.yml
@@ -33,7 +33,7 @@
- add_host:
name: "{{ item }}"
groups: problem_hosts_group
- with_items: problem_hosts
+ with_items: "{{ problem_hosts }}"
- name: "Run on problem hosts"
hosts: problem_hosts_group
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 789f66b14..4ea639cbe 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -338,6 +338,7 @@
failed_when: False
with_items:
- etcd
+ - etcd3
- firewalld
- name: Stop additional atomic services
@@ -352,6 +353,7 @@
when: not is_atomic | bool
with_items:
- etcd
+ - etcd3
- shell: systemctl reset-failed
changed_when: False
@@ -365,6 +367,7 @@
- /etc/ansible/facts.d/openshift.fact
- /etc/etcd
- /etc/systemd/system/etcd_container.service
+ - /etc/profile.d/etcdctl.sh
# Intenationally using rm command over file module because if someone had mounted a filesystem
# at /var/lib/etcd then the contents was not removed correctly
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index a542b4ca3..ed8aac398 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -16,11 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost'])
-
-- name: List Hosts
- hosts: oo_list_hosts
- gather_facts: no
- tasks:
+ oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
+ oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
+ with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index d22c86cda..4d76d3bfe 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -99,8 +99,8 @@
- name: Add Name tag to instances
ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
with_together:
- - instances
- - ec2.instances
+ - "{{ instances }}"
+ - "{{ ec2.instances }}"
args:
tags:
Name: "{{ item.0 }}"
@@ -154,8 +154,8 @@
openshift_node_labels: "{{ node_label }}"
logrotate_scripts: "{{ logrotate }}"
with_together:
- - instances
- - ec2.instances
+ - "{{ instances }}"
+ - "{{ ec2.instances }}"
- name: Add new instances to nodes_to_add group if needed
add_host:
@@ -169,13 +169,13 @@
openshift_node_labels: "{{ node_label }}"
logrotate_scripts: "{{ logrotate }}"
with_together:
- - instances
- - ec2.instances
+ - "{{ instances }}"
+ - "{{ ec2.instances }}"
when: oo_extend_env is defined and oo_extend_env | bool
- name: Wait for ssh
wait_for: "port=22 host={{ item.dns_name }}"
- with_items: ec2.instances
+ with_items: "{{ ec2.instances }}"
- name: Wait for user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
@@ -184,5 +184,5 @@
retries: 20
delay: 10
with_together:
- - instances
- - ec2.instances
+ - "{{ instances }}"
+ - "{{ ec2.instances }}"
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index fb13e1839..7a8375d0e 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -12,7 +12,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost'])
+ with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}"
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -39,7 +39,7 @@
clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
- with_items: groups.oo_hosts_to_terminate
+ with_items: "{{ groups.oo_hosts_to_terminate }}"
when: "'oo_hosts_to_terminate' in groups"
- name: Terminate instances
@@ -49,7 +49,7 @@
region: "{{ hostvars[item].ec2_region }}"
ignore_errors: yes
register: ec2_term
- with_items: groups.oo_hosts_to_terminate
+ with_items: "{{ groups.oo_hosts_to_terminate }}"
when: "'oo_hosts_to_terminate' in groups"
# Fail if any of the instances failed to terminate with an error other
@@ -57,7 +57,7 @@
- fail:
msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: ec2_term.results
+ with_items: "{{ ec2_term.results }}"
- name: Stop instance if termination failed
ec2:
@@ -66,12 +66,12 @@
region: "{{ item.item.ec2_region }}"
register: ec2_stop
when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: ec2_term.results
+ with_items: "{{ ec2_term.results }}"
- name: Rename stopped instances
ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
args:
tags:
Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: ec2_stop.results
+ with_items: "{{ ec2_stop.results }}"
when: ec2_stop | changed
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index d762203b2..ed05d61ed 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -7,7 +7,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -27,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index ca01dbc9d..de4e34e2d 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,5 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
-- [OpenShift Enterprise 3.0 to latest minor release](v3_0_minor/README.md)
-- [OpenShift Enterprise 3.0 to 3.1](v3_0_to_v3_1/README.md)
+- [OpenShift Enterprise 3.2 to 3.3](v3_3/README.md)
+- [OpenShift Enterprise 3.1 to 3.2](v3_2/README.md)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 9be6becc1..834461e14 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -25,13 +25,13 @@
tasks:
- name: Prepare for Node evacuation
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
@@ -40,7 +40,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift.node.schedulable | bool
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index 1755203a4..a3ab78ccf 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -10,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
changed_when: false
- hosts: l_oo_all_hosts
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
new file mode 100644
index 000000000..c25f96212
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -0,0 +1,26 @@
+---
+- include: ../../../common/openshift-cluster/verify_ansible_version.yml
+
+- name: Create initial host groups for localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
+
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../cluster_hosts.yml
+
+- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
index 5d549eee7..d92761e48 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
@@ -10,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index 7a3829283..9a5d84751 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -92,10 +92,9 @@
vars:
master_config_hook: "v3_3/master_config_upgrade.yml"
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
vars:
node_config_hook: "v3_3/node_config_upgrade.yml"
-- include: ../../../openshift-master/restart.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index d6af71827..c9338a960 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -98,3 +98,4 @@
master_config_hook: "v3_3/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md
new file mode 100644
index 000000000..85b807dc6
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md
@@ -0,0 +1,18 @@
+# v3.4 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node.
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
new file mode 100644
index 000000000..4f8a80ee8
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -0,0 +1,96 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
new file mode 100644
index 000000000..8cde2ac88
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -0,0 +1,98 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
new file mode 100644
index 000000000..f385d4f22
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -0,0 +1,100 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 0cf669ae3..0a163526a 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -8,7 +8,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index fced79262..279eeab21 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -8,7 +8,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 5737bb0e0..902221931 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -8,7 +8,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index f093411ef..f36caeb36 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -8,7 +8,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 26b31d313..825f46415 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -11,6 +11,8 @@
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
when: openshift.common.install_examples | bool
+ - role: openshift_hosted_templates
+ registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
when: openshift.common.use_manageiq | bool
- role: cockpit
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7112a6084..2f384ddea 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,5 +1,21 @@
---
# NOTE: requires openshift_facts be run
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ # See:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
+ # https://github.com/openshift/openshift-ansible/issues/1138
+ - name: Check for bad combinations of yum and subscription-manager
+ command: >
+ {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
+ register: yum_ver_test
+ changed_when: false
+ - fail:
+ msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
+ when: "'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 2ba7fded5..ccbba54b4 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -56,13 +56,13 @@
openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
- openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
- openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}"
- role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool )
+ when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 4996c56a7..5f008a045 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -224,7 +224,7 @@
- name: Prepare for node evacuation
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
--schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -232,7 +232,7 @@
- name: Evacuate node
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
--evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -240,7 +240,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
index 32a3636aa..439df5ffd 100644
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
@@ -1,5 +1,3 @@
-- include_vars: ../../../../roles/openshift_node/vars/main.yml
-
- name: Update systemd units
include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index 78f6c46f3..23cf8cf76 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -22,11 +22,11 @@
- name: Create service signer certificate
command: >
- {{ openshift.common.admin_binary }} ca create-signer-cert
- --cert=service-signer.crt
- --key=service-signer.key
- --name=openshift-service-serving-signer
- --serial=service-signer.serial.txt
+ {{ openshift.common.client_binary }} adm ca create-signer-cert
+ --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
+ --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
+ --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer
+ --serial="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.serial.txt
args:
chdir: "{{ remote_cert_create_tmpdir.stdout }}/"
when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index fc26d029e..ee75aa853 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -22,22 +22,24 @@
command: >
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
+ # Don't expect docker rpm to be available on hosts that don't already have it installed:
+ when: pkg_check.rc == 0
failed_when: false
changed_when: false
- fail:
msg: This playbook requires access to Docker 1.10 or later
# Disable the 1.10 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<'))
+ when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<')))
# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
- set_fact:
l_docker_upgrade: False
-# Make sure a docker_verison is set if none was requested:
+# Make sure a docker_version is set if none was requested:
- set_fact:
docker_version: "{{ avail_docker_version.stdout }}"
- when: docker_version is not defined
+ when: pkg_check.rc == 0 and docker_version is not defined
- name: Flag for Docker upgrade if necessary
set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
new file mode 100644
index 000000000..57b156b1c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -0,0 +1,73 @@
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=present"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
new file mode 100644
index 000000000..35f391f8c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -0,0 +1,47 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Get current image
+ shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}'
+ register: current_image
+
+- name: Set new_etcd_image
+ set_fact:
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=')
+ else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}"
+
+- name: Pull new etcd image
+ command: "docker pull {{ new_etcd_image }}"
+
+- name: Update to latest etcd image
+ replace:
+ dest: /etc/systemd/system/etcd_container.service
+ regexp: "{{ current_image.stdout }}$"
+ replace: "{{ new_etcd_image }}"
+
+- name: Restart etcd_container
+ systemd:
+ name: etcd_container
+ daemon_reload: yes
+ state: restarted
+
+## TODO: probably should just move this into the backup playbooks, also this
+## will fail on atomic host. We need to revisit how to do etcd backups there as
+## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Upgrade etcd for etcdctl when not atomic
+ action: "{{ ansible_pkg_mgr }} name=etcd ensure=latest"
+ when: not openshift.common.is_atomic | bool
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
+
+- name: Store new etcd_image
+ openshift_facts:
+ role: etcd
+ local_facts:
+ etcd_image: "{{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
new file mode 100644
index 000000000..30232110e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
@@ -0,0 +1,23 @@
+---
+# F23 GA'd with etcd 2.0, currently has 2.2 in updates
+# F24 GA'd with etcd-2.2, currently has 2.2 in updates
+# F25 Beta currently has etcd 3.0
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd
+ package:
+ name: "etcd"
+ state: "latest"
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
new file mode 120000
index 000000000..641e04e44
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
@@ -0,0 +1 @@
+../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
new file mode 100644
index 000000000..cce844403
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -0,0 +1,122 @@
+---
+# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
+# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
+# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
+# mirrored packages on your own because only the GA and latest versions are
+# available in the repos. So for Fedora we'll simply skip this, sorry.
+
+- include: ../../evaluate_groups.yml
+ tags:
+ - always
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_upgrade, etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+
+- name: Backup etcd before upgrading anything
+ include: backup.yml
+ vars:
+ backup_tag: "pre-upgrade-"
+
+- name: Drop etcdctl profiles
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - include: roles/etcd/tasks/etcdctl.yml
+
+- name: Determine etcd version
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ register: etcd_installed_version
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
+ - name: Record containerized etcd version
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_installed_version
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
+# through hosts, then loop through tasks only when appropriate
+- name: Upgrade to 2.1
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.1'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.2
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.2'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.2.5
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.2.5
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.3
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.3'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.3.7
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.3.7
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 3.0
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '3.0'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to etcd3 image
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 3.0.3
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade fedora to latest
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ tasks:
+ - include: fedora_tasks.yml
+ when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ backup_tag: "post-3.0-"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
new file mode 100644
index 000000000..8e7dc9d9b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
@@ -0,0 +1,23 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd package but exclude etcd3
+ command: "{{ ansible_pkg_mgr }} install -y etcd-{{ upgrade_version }}\\* --exclude etcd3"
+ when: upgrade_version | version_compare('3.0','<')
+
+- name: Update etcd package not excluding etcd3
+ command: "{{ ansible_pkg_mgr }} install -y etcd3-{{ upgrade_version }}\\*"
+ when: not upgrade_version | version_compare('3.0','<')
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index f3b3abe0d..fbdb7900a 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -10,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf
new file mode 120000
index 000000000..514526fe2
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf
@@ -0,0 +1 @@
+../../../../roles/openshift_node/templates/openvswitch-avoid-oom.conf \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index e43954453..2bbcbe1f8 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -17,10 +17,14 @@
# not already exist. We could have potentially done a replace --force to
# create and update in one step.
- openshift_examples
+ - openshift_hosted_templates
# Update the existing templates
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
openshift_examples_import_command: replace
+ - role: openshift_hosted_templates
+ registry_url: "{{ openshift.master.registry_url }}"
+ openshift_hosted_templates_import_command: replace
pre_tasks:
- name: Collect all routers
command: >
@@ -41,7 +45,7 @@
{{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
'{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
--api-version=v1
- with_items: haproxy_routers
+ with_items: "{{ haproxy_routers }}"
- name: Check for default registry
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index af77f140f..cd1139b29 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -5,3 +5,7 @@
- name: Ensure python-yaml present for config upgrade
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
when: not openshift.common.is_atomic | bool
+
+- name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+ when: component == "node"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 2c641e21e..57c25aa41 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -11,82 +11,24 @@
add_host:
name: "{{ item }}"
groups: etcd_hosts_to_backup
- with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+# If facts cache were for some reason deleted, this fact may not be set, and if not set
+# it will always default to true. This causes problems for the etcd data dir fact detection
+# so we must first make sure this is set correctly before attempting the backup.
+- name: Set master embedded_etcd fact
+ hosts: oo_masters_to_config
roles:
- openshift_facts
tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+ role: master
+ local_facts:
+ embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
- when: not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
+- name: Backup etcd
+ include: ./etcd/backup.yml
- name: Upgrade master packages
hosts: oo_masters_to_config
@@ -99,6 +41,8 @@
- include: rpm_upgrade.yml component=master
when: not openshift.common.is_containerized | bool
+# Create service signer cert when missing. Service signer certificate
+# is added to master config in the master config hook for v3_3.
- name: Determine if service signer cert must be created
hosts: oo_first_master
tasks:
@@ -108,8 +52,6 @@
register: service_signer_cert_stat
changed_when: false
-# Create service signer cert when missing. Service signer certificate
-# is added to master config in the master config hook for v3_3.
- include: create_service_signer_cert.yml
- name: Upgrade master config and systemd units
@@ -128,13 +70,6 @@
- name: Update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
-# - name: Upgrade master configuration
-# openshift_upgrade_config:
-# from_version: '3.1'
-# to_version: '3.2'
-# role: master
-# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
-
- name: Check for ca-bundle.crt
stat:
path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
@@ -184,6 +119,10 @@
msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
when: master_update_failed | length > 0
+# We are now ready to restart master services (or entire system
+# depending on openshift_rolling_restart_mode):
+- include: ../../openshift-master/restart.yml
+
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
###############################################################################
@@ -200,19 +139,15 @@
# restart.
skip_docker_role: True
tasks:
- - name: Verifying the correct commandline tools are available
- shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
- when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
-
- name: Reconcile Cluster Roles
command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-role-bindings
--exclude-groups=system:authenticated
--exclude-groups=system:authenticated:oauth
@@ -222,9 +157,15 @@
when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
run_once: true
+ - name: Reconcile Jenkins Pipeline Role Bindings
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm
+ run_once: true
+ when: openshift.common.version_gte_3_4_or_1_4 | bool
+
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
+ {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
run_once: true
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 9b572dcdf..1f314c854 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -29,7 +29,7 @@
- name: Mark unschedulable if host is a node
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
# NOTE: There is a transient "object has been modified" error here, allow a couple
@@ -41,7 +41,7 @@
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
tasks:
@@ -64,7 +64,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
register: node_sched
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
index fd2bc24ae..f460612ba 100644
--- a/playbooks/common/openshift-etcd/service.yml
+++ b/playbooks/common/openshift-etcd/service.yml
@@ -10,7 +10,7 @@
- name: Evaluate g_service_etcd
add_host: name={{ item }} groups=g_service_etcd
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change etcd state on etcd instance(s)
hosts: g_service_etcd
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
index e06a14c89..efc80edf9 100644
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ b/playbooks/common/openshift-loadbalancer/service.yml
@@ -10,7 +10,7 @@
- name: Evaluate g_service_lb
add_host: name={{ item }} groups=g_service_lb
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change state on lb instance(s)
hosts: g_service_lb
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index a53c55c14..5fcb850a2 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -53,7 +53,7 @@
when: openshift_hosted_metrics_deployer_prefix is not defined
- set_fact:
openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}"
- when: openshift_hosted_metrics_deployer_prefix is not defined
+ when: openshift_hosted_metrics_deployer_version is not defined
roles:
- openshift_facts
post_tasks:
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 57a63cfee..5769ef5cd 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -66,63 +66,8 @@
current_host: "{{ exists.stat.exists }}"
when: openshift.common.rolling_restart_mode == 'system'
-- name: Determine which masters are currently active
- hosts: oo_masters_to_config
- any_errors_fatal: true
- tasks:
- - name: Check master service status
- command: >
- systemctl is-active {{ openshift.common.service_type }}-master
- register: active_check_output
- when: openshift.master.cluster_method | default(None) == 'pacemaker'
- failed_when: false
- changed_when: false
- - set_fact:
- is_active: "{{ active_check_output.stdout == 'active' }}"
- when: openshift.master.cluster_method | default(None) == 'pacemaker'
-
-- name: Evaluate master groups
- hosts: localhost
- become: no
- tasks:
- - fail:
- msg: >
- Did not receive active status from any masters. Please verify pacemaker cluster.
- when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('is_active')
- | list) }}"
- - name: Evaluate oo_active_masters
- add_host:
- name: "{{ item }}"
- groups: oo_active_masters
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_masters_to_config | default([]) }}"
- when: (hostvars[item]['is_active'] | default(false)) | bool
- - name: Evaluate oo_current_masters
- add_host:
- name: "{{ item }}"
- groups: oo_current_masters
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_masters_to_config | default([]) }}"
- when: (hostvars[item]['current_host'] | default(false)) | bool
-
-- name: Validate pacemaker cluster
- hosts: oo_active_masters
- tasks:
- - name: Retrieve pcs status
- command: pcs status
- register: pcs_status_output
- changed_when: false
- - fail:
- msg: >
- Pacemaker cluster validation failed. One or more nodes are not online.
- when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool
-
- name: Restart masters
- hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters
+ hosts: oo_masters_to_config
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
@@ -132,20 +77,3 @@
- include: restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
-- name: Restart active masters
- hosts: oo_active_masters
- serial: 1
- tasks:
- - include: restart_hosts_pacemaker.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services_pacemaker.yml
- when: openshift.common.rolling_restart_mode == 'services'
-
-- name: Restart current masters
- hosts: oo_current_masters
- serial: 1
- tasks:
- - include: restart_hosts.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services.yml
- when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index ff206f5a2..b1c36718c 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -5,8 +5,8 @@
poll: 0
ignore_errors: true
become: yes
-# When cluster_method != pacemaker we can ensure the api_port is
-# available.
+
+# Ensure the api_port is available.
- name: Wait for master API to come back online
become: no
local_action:
@@ -15,25 +15,3 @@
state=started
delay=10
port="{{ openshift.master.api_port }}"
- when: openshift.master.cluster_method != 'pacemaker'
-- name: Wait for master to start
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port=22
- when: openshift.master.cluster_method == 'pacemaker'
-- name: Wait for master to become available
- command: pcs status
- register: pcs_status_output
- until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool
- retries: 15
- delay: 2
- changed_when: false
- when: openshift.master.cluster_method == 'pacemaker'
-- fail:
- msg: >
- Pacemaker cluster validation failed {{ inventory hostname }} is not online.
- when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool
diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
deleted file mode 100644
index c9219e8de..000000000
--- a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-- name: Fail over master resource
- command: >
- pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }}
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.master.cluster_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
-- name: Restart master system
- # https://github.com/ansible/ansible/issues/10616
- shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
- async: 1
- poll: 0
- ignore_errors: true
- become: yes
-- name: Wait for master to start
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml
deleted file mode 100644
index e738f3fb6..000000000
--- a/playbooks/common/openshift-master/restart_services_pacemaker.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-- name: Restart master services
- command: pcs resource restart master
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.master.cluster_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 56ed09e1b..18e5c665f 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -33,7 +33,7 @@
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
- name: verify api server
command: >
- curl --silent
+ curl --silent --tlsv1.2
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
{% else %}
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index f60c5a2b5..5e5198335 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -10,7 +10,7 @@
- name: Evaluate g_service_masters
add_host: name={{ item }} groups=g_service_masters
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change state on master instance(s)
hosts: g_service_masters
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
index 20c8ca248..8468014da 100644
--- a/playbooks/common/openshift-nfs/service.yml
+++ b/playbooks/common/openshift-nfs/service.yml
@@ -8,7 +8,7 @@
- name: Evaluate g_service_nfs
add_host: name={{ item }} groups=g_service_nfs
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change state on nfs instance(s)
hosts: g_service_nfs
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 5191662f7..4824eeef3 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -165,7 +165,7 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent
+ curl --silent --tlsv1.2
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
{% else %}
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index 0f07add2a..33095c9fb 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -10,7 +10,7 @@
- name: Evaluate g_service_nodes
add_host: name={{ item }} groups=g_service_nodes
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change state on node instance(s)
hosts: g_service_nodes
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
deleted file mode 100644
index fcaa3b850..000000000
--- a/playbooks/gce/openshift-cluster/library/gce.py
+++ /dev/null
@@ -1,543 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: gce
-version_added: "1.4"
-short_description: create or terminate GCE instances
-description:
- - Creates or terminates Google Compute Engine (GCE) instances. See
- U(https://cloud.google.com/products/compute-engine) for an overview.
- Full install/configuration instructions for the gce* modules can
- be found in the comments of ansible/test/gce_tests.py.
-options:
- image:
- description:
- - image string to use for the instance
- required: false
- default: "debian-7"
- instance_names:
- description:
- - a comma-separated list of instance names to create or destroy
- required: false
- default: null
- machine_type:
- description:
- - machine type to use for the instance, use 'n1-standard-1' by default
- required: false
- default: "n1-standard-1"
- metadata:
- description:
- - a hash/dictionary of custom data for the instance;
- '{"key":"value", ...}'
- required: false
- default: null
- service_account_email:
- version_added: "1.5.1"
- description:
- - service account email
- required: false
- default: null
- service_account_permissions:
- version_added: "2.0"
- description:
- - service account permissions (see
- U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
- --scopes section for detailed information)
- required: false
- default: null
- choices: [
- "bigquery", "cloud-platform", "compute-ro", "compute-rw",
- "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
- "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
- "storage-rw", "taskqueue", "userinfo-email"
- ]
- pem_file:
- version_added: "1.5.1"
- description:
- - path to the pem file associated with the service account email
- required: false
- default: null
- project_id:
- version_added: "1.5.1"
- description:
- - your GCE project ID
- required: false
- default: null
- name:
- description:
- - identifier when working with a single instance
- required: false
- network:
- description:
- - name of the network, 'default' will be used if not specified
- required: false
- default: "default"
- persistent_boot_disk:
- description:
- - if set, create the instance with a persistent boot disk
- required: false
- default: "false"
- disks:
- description:
- - a list of persistent disks to attach to the instance; a string value
- gives the name of the disk; alternatively, a dictionary value can
- define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
- will be the boot disk (which must be READ_WRITE).
- required: false
- default: null
- version_added: "1.7"
- state:
- description:
- - desired state of the resource
- required: false
- default: "present"
- choices: ["active", "present", "absent", "deleted"]
- tags:
- description:
- - a comma-separated list of tags to associate with the instance
- required: false
- default: null
- zone:
- description:
- - the GCE zone to use
- required: true
- default: "us-central1-a"
- ip_forward:
- version_added: "1.9"
- description:
- - set to true if the instance can forward ip packets (useful for
- gateways)
- required: false
- default: "false"
- external_ip:
- version_added: "1.9"
- description:
- - type of external ip, ephemeral by default
- required: false
- default: "ephemeral"
- disk_auto_delete:
- version_added: "1.9"
- description:
- - if set boot disk will be removed after instance destruction
- required: false
- default: "true"
-
-requirements:
- - "python >= 2.6"
- - "apache-libcloud >= 0.13.3"
-notes:
- - Either I(name) or I(instance_names) is required.
-author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
-'''
-
-EXAMPLES = '''
-# Basic provisioning example. Create a single Debian 7 instance in the
-# us-central1-a Zone of n1-standard-1 machine type.
-- local_action:
- module: gce
- name: test-instance
- zone: us-central1-a
- machine_type: n1-standard-1
- image: debian-7
-
-# Example using defaults and with metadata to create a single 'foo' instance
-- local_action:
- module: gce
- name: foo
- metadata: '{"db":"postgres", "group":"qa", "id":500}'
-
-
-# Launch instances from a control node, runs some tasks on the new instances,
-# and then terminate them
-- name: Create a sandbox instance
- hosts: localhost
- vars:
- names: foo,bar
- machine_type: n1-standard-1
- image: debian-6
- zone: us-central1-a
- service_account_email: unique-email@developer.gserviceaccount.com
- pem_file: /path/to/pem_file
- project_id: project-id
- tasks:
- - name: Launch instances
- local_action: gce instance_names={{names}} machine_type={{machine_type}}
- image={{image}} zone={{zone}}
- service_account_email={{ service_account_email }}
- pem_file={{ pem_file }} project_id={{ project_id }}
- register: gce
- - name: Wait for SSH to come up
- local_action: wait_for host={{item.public_ip}} port=22 delay=10
- timeout=60 state=started
- with_items: {{gce.instance_data}}
-
-- name: Configure instance(s)
- hosts: launched
- sudo: True
- roles:
- - my_awesome_role
- - my_awesome_tasks
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- instance_names: {{gce.instance_names}}
-
-'''
-
-try:
- import libcloud
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
- ResourceExistsError, ResourceInUseError, ResourceNotFoundError
- _ = Provider.GCE
- HAS_LIBCLOUD = True
-except ImportError:
- HAS_LIBCLOUD = False
-
-try:
- from ast import literal_eval
- HAS_PYTHON26 = True
-except ImportError:
- HAS_PYTHON26 = False
-
-
-def get_instance_info(inst):
- """Retrieves instance information from an instance object and returns it
- as a dictionary.
-
- """
- metadata = {}
- if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
- for md in inst.extra['metadata']['items']:
- metadata[md['key']] = md['value']
-
- try:
- netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- except:
- netname = None
- if 'disks' in inst.extra:
- disk_names = [disk_info['source'].split('/')[-1]
- for disk_info
- in sorted(inst.extra['disks'],
- key=lambda disk_info: disk_info['index'])]
- else:
- disk_names = []
-
- if len(inst.public_ips) == 0:
- public_ip = None
- else:
- public_ip = inst.public_ips[0]
-
- return({
- 'image': inst.image is not None and inst.image.split('/')[-1] or None,
- 'disks': disk_names,
- 'machine_type': inst.size,
- 'metadata': metadata,
- 'name': inst.name,
- 'network': netname,
- 'private_ip': inst.private_ips[0],
- 'public_ip': public_ip,
- 'status': ('status' in inst.extra) and inst.extra['status'] or None,
- 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
- 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
- })
-
-
-def create_instances(module, gce, instance_names):
- """Creates new instances. Attributes other than instance_names are picked
- up from 'module'
-
- module : AnsibleModule object
- gce: authenticated GCE libcloud driver
- instance_names: python list of instance names to create
-
- Returns:
- A list of dictionaries with instance information
- about the instances that were launched.
-
- """
- image = module.params.get('image')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- disks = module.params.get('disks')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- external_ip = module.params.get('external_ip')
- disk_auto_delete = module.params.get('disk_auto_delete')
- service_account_permissions = module.params.get('service_account_permissions')
- service_account_email = module.params.get('service_account_email')
-
- if external_ip == "none":
- external_ip = None
-
- new_instances = []
- changed = False
-
- lc_image = gce.ex_get_image(image)
- lc_disks = []
- disk_modes = []
- for i, disk in enumerate(disks or []):
- if isinstance(disk, dict):
- lc_disks.append(gce.ex_get_volume(disk['name']))
- disk_modes.append(disk['mode'])
- else:
- lc_disks.append(gce.ex_get_volume(disk))
- # boot disk is implicitly READ_WRITE
- disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
- lc_network = gce.ex_get_network(network)
- lc_machine_type = gce.ex_get_size(machine_type)
- lc_zone = gce.ex_get_zone(zone)
-
- # Try to convert the user's metadata value into the format expected
- # by GCE. First try to ensure user has proper quoting of a
- # dictionary-like syntax using 'literal_eval', then convert the python
- # dict into a python list of 'key' / 'value' dicts. Should end up
- # with:
- # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
- if metadata:
- if isinstance(metadata, dict):
- md = metadata
- else:
- try:
- md = literal_eval(str(metadata))
- if not isinstance(md, dict):
- raise ValueError('metadata must be a dict')
- except ValueError as e:
- module.fail_json(msg='bad metadata: %s' % str(e))
- except SyntaxError as e:
- module.fail_json(msg='bad metadata syntax')
-
- if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
- items = []
- for k, v in md.items():
- items.append({"key": k, "value": v})
- metadata = {'items': items}
- else:
- metadata = md
-
- ex_sa_perms = []
- bad_perms = []
- if service_account_permissions:
- for perm in service_account_permissions:
- if perm not in gce.SA_SCOPES_MAP.keys():
- bad_perms.append(perm)
- if len(bad_perms) > 0:
- module.fail_json(msg='bad permissions: %s' % str(bad_perms))
- if service_account_email:
- ex_sa_perms.append({'email': service_account_email})
- else:
- ex_sa_perms.append({'email': "default"})
- ex_sa_perms[0]['scopes'] = service_account_permissions
-
- # These variables all have default values but check just in case
- if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
- module.fail_json(msg='Missing required create instance variable',
- changed=False)
-
- for name in instance_names:
- pd = None
- if lc_disks:
- pd = lc_disks[0]
- elif persistent_boot_disk:
- try:
- pd = gce.create_volume(None, "%s" % name, image=lc_image)
- except ResourceExistsError:
- pd = gce.ex_get_volume("%s" % name, lc_zone)
- inst = None
- try:
- inst = gce.create_node(
- name, lc_machine_type, lc_image, location=lc_zone,
- ex_network=network, ex_tags=tags, ex_metadata=metadata,
- ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
- external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
- ex_service_accounts=ex_sa_perms
- )
- changed = True
- except ResourceExistsError:
- inst = gce.ex_get_node(name, lc_zone)
- except GoogleBaseError as e:
- module.fail_json(msg='Unexpected error attempting to create ' +
- 'instance %s, error: %s' % (name, e.value))
-
- for i, lc_disk in enumerate(lc_disks):
- # Check whether the disk is already attached
- if (len(inst.extra['disks']) > i):
- attached_disk = inst.extra['disks'][i]
- if attached_disk['source'] != lc_disk.extra['selfLink']:
- module.fail_json(
- msg=("Disk at index %d does not match: requested=%s found=%s" % (
- i, lc_disk.extra['selfLink'], attached_disk['source'])))
- elif attached_disk['mode'] != disk_modes[i]:
- module.fail_json(
- msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
- i, disk_modes[i], attached_disk['mode'])))
- else:
- continue
- gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
- # Work around libcloud bug: attached volumes don't get added
- # to the instance metadata. get_instance_info() only cares about
- # source and index.
- if len(inst.extra['disks']) != i+1:
- inst.extra['disks'].append(
- {'source': lc_disk.extra['selfLink'], 'index': i})
-
- if inst:
- new_instances.append(inst)
-
- instance_names = []
- instance_json_data = []
- for inst in new_instances:
- d = get_instance_info(inst)
- instance_names.append(d['name'])
- instance_json_data.append(d)
-
- return (changed, instance_json_data, instance_names)
-
-
-def terminate_instances(module, gce, instance_names, zone_name):
- """Terminates a list of instances.
-
- module: Ansible module object
- gce: authenticated GCE connection object
- instance_names: a list of instance names to terminate
- zone_name: the zone where the instances reside prior to termination
-
- Returns a dictionary of instance names that were terminated.
-
- """
- changed = False
- terminated_instance_names = []
- for name in instance_names:
- inst = None
- try:
- inst = gce.ex_get_node(name, zone_name)
- except ResourceNotFoundError:
- pass
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
- if inst:
- gce.destroy_node(inst)
- terminated_instance_names.append(inst.name)
- changed = True
-
- return (changed, terminated_instance_names)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- image=dict(default='debian-7'),
- instance_names=dict(),
- machine_type=dict(default='n1-standard-1'),
- metadata=dict(),
- name=dict(),
- network=dict(default='default'),
- persistent_boot_disk=dict(type='bool', default=False),
- disks=dict(type='list'),
- state=dict(choices=['active', 'present', 'absent', 'deleted'],
- default='present'),
- tags=dict(type='list'),
- zone=dict(default='us-central1-a'),
- service_account_email=dict(),
- service_account_permissions=dict(type='list'),
- pem_file=dict(),
- project_id=dict(),
- ip_forward=dict(type='bool', default=False),
- external_ip=dict(choices=['ephemeral', 'none'],
- default='ephemeral'),
- disk_auto_delete=dict(type='bool', default=True),
- )
- )
-
- if not HAS_PYTHON26:
- module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
- if not HAS_LIBCLOUD:
- module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
-
- gce = gce_connect(module)
-
- image = module.params.get('image')
- instance_names = module.params.get('instance_names')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- name = module.params.get('name')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- changed = False
-
- inames = []
- if isinstance(instance_names, list):
- inames = instance_names
- elif isinstance(instance_names, str):
- inames = instance_names.split(',')
- if name:
- inames.append(name)
- if not inames:
- module.fail_json(msg='Must specify a "name" or "instance_names"',
- changed=False)
- if not zone:
- module.fail_json(msg='Must specify a "zone"', changed=False)
-
- json_output = {'zone': zone}
- if state in ['absent', 'deleted']:
- json_output['state'] = 'absent'
- (changed, terminated_instance_names) = terminate_instances(
- module, gce, inames, zone)
-
- # based on what user specified, return the same variable, although
- # value could be different if an instance could not be destroyed
- if instance_names:
- json_output['instance_names'] = terminated_instance_names
- elif name:
- json_output['name'] = name
-
- elif state in ['active', 'present']:
- json_output['state'] = 'present'
- (changed, instance_data, instance_name_list) = create_instances(
- module, gce, inames)
- json_output['instance_data'] = instance_data
- if instance_names:
- json_output['instance_names'] = instance_name_list
- elif name:
- json_output['name'] = name
-
- json_output['changed'] = changed
- module.exit_json(**json_output)
-
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.gce import *
-if __name__ == '__main__':
- main()
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index c29cac272..34ab09533 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
+ oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
+ oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
+ with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 60cf21a5b..b7604580c 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -9,6 +9,7 @@
project_id: "{{ lookup('env', 'gce_project_id') }}"
zone: "{{ lookup('env', 'zone') }}"
network: "{{ lookup('env', 'network') }}"
+ subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
# unsupported in 1.9.+
#service_account_permissions: "datastore,logging-write"
tags:
@@ -49,11 +50,11 @@
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
openshift_node_labels: "{{ node_label }}"
- with_items: gce.instance_data | default([], true)
+ with_items: "{{ gce.instance_data | default([], true) }}"
- name: Wait for ssh
wait_for: port=22 host={{ item.public_ip }}
- with_items: gce.instance_data | default([], true)
+ with_items: "{{ gce.instance_data | default([], true) }}"
- name: Wait for user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
@@ -61,4 +62,4 @@
until: result.rc == 0
retries: 30
delay: 5
- with_items: gce.instance_data | default([], true)
+ with_items: "{{ gce.instance_data | default([], true) }}"
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 6a0ac088a..68e60f9d4 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -12,7 +12,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost'])
+ with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}"
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -43,7 +43,7 @@
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
zone: "{{ lookup('env', 'zone') }}"
- with_items: groups['oo_hosts_to_terminate'] | default([], true)
+ with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
when: item is defined
#- include: ../openshift-node/terminate.yml
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 332f27da7..6d2af3d26 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -7,7 +7,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -27,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index eb64544db..579cd7ac6 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost'])
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
+ oo_public_ipv4: ""
+ oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
+ with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index df5c52f2d..81e6d8f05 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -1,5 +1,5 @@
---
-# TODO: does not handle a non-existant cluster gracefully
+# TODO: does not handle a non-existent cluster gracefully
- name: Terminate instance(s)
hosts: localhost
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 755090f94..20ce47c07 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -45,7 +45,7 @@ parameters:
node_port_incoming:
type: string
label: Source of node port connections
- description: Authorized sources targetting node ports
+ description: Authorized sources targeting node ports
default: 0.0.0.0/0
num_etcd:
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index de68f5207..6c6f671be 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -17,18 +17,8 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
+ oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 332f27da7..6d2af3d26 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -7,7 +7,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -27,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 000000000..e55ef5f0b
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,2 @@
+ansible>=2.1
+pyOpenSSL
diff --git a/roles/docker/README.md b/roles/docker/README.md
index 6b5ee4421..1f0d94da0 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -17,7 +17,7 @@ docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redh
Dependencies
------------
-None
+Depends on the os_firewall role.
Example Playbook
----------------
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index 6e2c98601..3d362158d 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -9,4 +9,6 @@ galaxy_info:
- name: EL
versions:
- 7
-dependencies: []
+dependencies:
+ - role: os_firewall
+ os_firewall_use_firewalld: False
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 7147aa2d4..9b7ef0830 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -1,7 +1,4 @@
---
-- stat: path=/etc/sysconfig/docker-storage
- register: docker_storage_check
-
- name: Get current installed Docker version
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
@@ -46,15 +43,16 @@
action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present"
when: not openshift.common.is_atomic | bool
-- name: Start the Docker service
- service:
- name: docker
- enabled: yes
- state: started
- register: start_result
+- name: Ensure docker.service.d directory exists
+ file:
+ path: "{{ docker_systemd_dir }}"
+ state: directory
-- set_fact:
- docker_service_status_changed: start_result | changed
+# Extend the default Docker service unit file
+- name: Configure Docker service unit file
+ template:
+ dest: "{{ docker_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
- include: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
@@ -113,4 +111,15 @@
notify:
- restart docker
+- name: Start the Docker service
+ systemd:
+ name: docker
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: start_result
+
+- set_fact:
+ docker_service_status_changed: start_result | changed
+
- meta: flush_handlers
diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/docker/tasks/udev_workaround.yml
index 3c236f698..aa7af0cb3 100644
--- a/roles/docker/tasks/udev_workaround.yml
+++ b/roles/docker/tasks/udev_workaround.yml
@@ -14,7 +14,7 @@
copy:
content: |
[Service]
- #Need blank ExecStart to "clear" pre-exising one
+ #Need blank ExecStart to "clear" pre-existing one
ExecStart=
{{ udevw_udev_start_cmd.stdout }} --event-timeout=300
dest: "{{ udevw_udevd_dir }}/override.conf"
diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2
new file mode 100644
index 000000000..53ed56abc
--- /dev/null
+++ b/roles/docker/templates/custom.conf.j2
@@ -0,0 +1,5 @@
+# {{ ansible_managed }}
+
+[Unit]
+Requires=iptables.service
+After=iptables.service
diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml
index f81f99e2b..5237ed8f2 100644
--- a/roles/docker/vars/main.yml
+++ b/roles/docker/vars/main.yml
@@ -1,2 +1,3 @@
---
udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
+docker_systemd_dir: /etc/systemd/system/docker.service.d
diff --git a/roles/etcd/etcdctl.sh b/roles/etcd/etcdctl.sh
new file mode 100644
index 000000000..0e324a8a9
--- /dev/null
+++ b/roles/etcd/etcdctl.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
+# command flags are different between the two. Should work on stand
+# alone etcd hosts and master + etcd hosts too because we use the peer keys.
+etcdctl2() {
+ /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@}
+}
+
+etcdctl3() {
+ ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@}
+}
diff --git a/roles/etcd/files/etcdctl.sh b/roles/etcd/files/etcdctl.sh
new file mode 100644
index 000000000..0e324a8a9
--- /dev/null
+++ b/roles/etcd/files/etcdctl.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
+# command flags are different between the two. Should work on stand
+# alone etcd hosts and master + etcd hosts too because we use the peer keys.
+etcdctl2() {
+ /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@}
+}
+
+etcdctl3() {
+ ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@}
+}
diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd/tasks/etcdctl.yml
new file mode 100644
index 000000000..32c176449
--- /dev/null
+++ b/roles/etcd/tasks/etcdctl.yml
@@ -0,0 +1,11 @@
+- name: Install etcd for etcdctl
+ action: "{{ ansible_pkg_mgr }} name=etcd state=present"
+ when: not openshift.common.is_atomic | bool
+
+- name: Configure etcd profile.d alises
+ copy:
+ src: etcdctl.sh
+ dest: /etc/profile.d/etcdctl.sh
+ mode: 0755
+ owner: root
+ group: root
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 2bc6a8678..790eb3c5a 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -74,5 +74,8 @@
enabled: yes
register: start_result
+- include: etcdctl.yml
+ when: openshift_etcd_etcdctl_profile | default(true) | bool
+
- set_fact:
etcd_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 1ff1d6ef8..93633e3e6 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -25,7 +25,7 @@ etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
-etcd_ca_default_days: 365
+etcd_ca_default_days: 1825
# etcd server & certificate vars
etcd_hostname: "{{ inventory_hostname }}"
diff --git a/roles/etcd_common/library/delegated_serial_command.py b/roles/etcd_common/library/delegated_serial_command.py
index 3969edfdd..84d4f97c2 100755
--- a/roles/etcd_common/library/delegated_serial_command.py
+++ b/roles/etcd_common/library/delegated_serial_command.py
@@ -270,6 +270,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-from ansible.module_utils.splitter import *
main()
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index 6b6dfb423..bf400cfe8 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -2,23 +2,20 @@
- name: Install flannel
become: yes
action: "{{ ansible_pkg_mgr }} name=flannel state=present"
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_atomic | bool
-- name: Set flannel etcd url
+- name: Set flannel etcd options
become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
- regexp: "^(FLANNEL_ETCD=)"
- line: '\1{{ etcd_hosts|join(",") }}'
-
-- name: Set flannel etcd key
- become: yes
- lineinfile:
- dest: /etc/sysconfig/flanneld
- backrefs: yes
- regexp: "^(FLANNEL_ETCD_KEY=)"
- line: '\1{{ flannel_etcd_key }}'
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "^(FLANNEL_ETCD=)", line: '\1{{ etcd_hosts|join(",") }}' }
+ - { regexp: "^(FLANNEL_ETCD_ENDPOINTS=)", line: '\1{{ etcd_hosts|join(",") }}' }
+ - { regexp: "^(FLANNEL_ETCD_KEY=)", line: '\1{{ flannel_etcd_key }}' }
+ - { regexp: "^(FLANNEL_ETCD_PREFIX=)", line: '\1{{ flannel_etcd_key }}' }
- name: Set flannel options
become: yes
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
index 1ac8eed4d..9bd3228c1 100644
--- a/roles/kube_nfs_volumes/library/partitionpool.py
+++ b/roles/kube_nfs_volumes/library/partitionpool.py
@@ -60,7 +60,7 @@ options:
- Example 3: size=200G:1,100G:2 says that the ratio of space occupied by 200 GiB
partitions and 100GiB partition is 1:2. Therefore, on 1 TiB disk, 1/3
(300 GiB) should be occupied by 200 GiB partitions. Only one fits there,
- so only one is created (we always round nr. of partitions *down*). Teh rest
+ so only one is created (we always round nr. of partitions *down*). The rest
(800 GiB) is split into eight 100 GiB partitions, even though it's more
than 2/3 of total space - free space is always allocated as much as possible.
- size=200G:1,100G:2 = 1x 200 GiB and 8x 100 GiB partitions (on 1 TiB disk).
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index 5432a5e2f..5eff30f6f 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -12,11 +12,11 @@
- name: create filesystem
filesystem: fstype=ext4 dev=/dev/{{ item.name }}
- with_items: partition_pool
+ with_items: "{{ partition_pool }}"
- name: mount
mount: name={{mount_dir}}/{{ item.name }} src=/dev/{{ item.name }} state=mounted fstype=ext4 passno=2
- with_items: partition_pool
+ with_items: "{{ partition_pool }}"
- include: nfs.yml
@@ -28,4 +28,4 @@
body_format: json
status_code: 201
HEADER_Authorization: "Bearer {{ kubernetes_token }}"
- with_items: partition_pool
+ with_items: "{{ partition_pool }}"
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
index 9a68ceb8d..474ec69e5 100644
--- a/roles/kube_nfs_volumes/tasks/nfs.yml
+++ b/roles/kube_nfs_volumes/tasks/nfs.yml
@@ -13,5 +13,5 @@
lineinfile: dest=/etc/exports
regexp="^{{ mount_dir }}/{{ item.name }} "
line="{{ mount_dir }}/{{ item.name }} {{nfs_export_options}}"
- with_items: partition_pool
+ with_items: "{{ partition_pool }}"
notify: restart nfs
diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml
index e2c51a903..1979c851f 100644
--- a/roles/nickhammond.logrotate/tasks/main.yml
+++ b/roles/nickhammond.logrotate/tasks/main.yml
@@ -7,5 +7,5 @@
template:
src: logrotate.d.j2
dest: /etc/logrotate.d/{{ item.name }}
- with_items: logrotate_scripts
+ with_items: "{{ logrotate_scripts | default([]) }}"
when: logrotate_scripts is defined
diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml
index 9b777213e..16dac8720 100644
--- a/roles/nuage_common/defaults/main.yaml
+++ b/roles/nuage_common/defaults/main.yaml
@@ -10,4 +10,4 @@ nuage_master_mon_dir: /usr/share/nuage-openshift-monitor
nuage_node_plugin_dir: /usr/share/vsp-openshift
nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"
-
+nuage_mon_cert_validity_period: "{{ nuage_cert_validity_period | default('3650') }}"
diff --git a/roles/nuage_master/tasks/certificates.yml b/roles/nuage_master/tasks/certificates.yml
index 32b024487..0a2f375cd 100644
--- a/roles/nuage_master/tasks/certificates.yml
+++ b/roles/nuage_master/tasks/certificates.yml
@@ -15,7 +15,7 @@
- name: Generate the crt file
command: >
- openssl x509 -req -in "{{ nuage_mon_rest_server_crt_dir }}/restServer.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_rest_server_crt }}"
+ openssl x509 -req -in "{{ nuage_mon_rest_server_crt_dir }}/restServer.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_rest_server_crt }}" -days "{{ nuage_mon_cert_validity_period }}"
delegate_to: "{{ nuage_ca_master }}"
- name: Remove the req file
diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml
index 5b4af5824..2b3ae0454 100644
--- a/roles/nuage_master/tasks/serviceaccount.yml
+++ b/roles/nuage_master/tasks/serviceaccount.yml
@@ -16,7 +16,7 @@
shell: >
echo {{ nuage_service_account_config | to_json | quote }} |
{{ openshift.common.client_binary }} create
- -n default
+ -n default
--config={{nuage_tmp_conf}}
-f -
register: osnuage_create_service_account
@@ -25,7 +25,7 @@
- name: Configure role/user permissions
command: >
- {{ openshift.common.admin_binary }} {{item}}
+ {{ openshift.common.client_binary }} adm {{item}}
--config={{nuage_tmp_conf}}
with_items: "{{nuage_tasks}}"
register: osnuage_perm_task
@@ -34,7 +34,7 @@
- name: Generate the node client config
command: >
- {{ openshift.common.admin_binary }} create-api-client-config
+ {{ openshift.common.client_binary }} adm create-api-client-config
--certificate-authority={{ openshift_master_ca_cert }}
--client-dir={{ cert_output_dir }}
--master={{ openshift.master.api_url }}
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
index 63117adc0..de2a97e37 100644
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2
@@ -23,7 +23,7 @@ enterpriseAdminUser: {{ nuage_master_adminusername }}
enterpriseAdminPassword: {{ nuage_master_adminuserpasswd }}
# Location where logs should be saved
log_dir: {{ nuage_mon_rest_server_logdir }}
-# Monitor rest server paramters
+# Monitor rest server parameters
# Logging level for the nuage openshift monitor
# allowed options are: 0 => INFO, 1 => WARNING, 2 => ERROR, 3 => FATAL
logLevel: {{ nuage_mon_log_level }}
diff --git a/roles/nuage_node/tasks/certificates.yml b/roles/nuage_node/tasks/certificates.yml
index 0fe6f7bac..7fcd4274d 100644
--- a/roles/nuage_node/tasks/certificates.yml
+++ b/roles/nuage_node/tasks/certificates.yml
@@ -15,7 +15,7 @@
- name: Generate the crt file
command: >
- openssl x509 -req -in "{{ nuage_plugin_rest_client_crt_dir }}/restClient.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_plugin_crt }}" -extensions clientauth -extfile "{{ nuage_ca_dir }}"/openssl.cnf
+ openssl x509 -req -in "{{ nuage_plugin_rest_client_crt_dir }}/restClient.req" -CA "{{ nuage_ca_crt }}" -CAkey "{{ nuage_ca_key }}" -CAserial "{{ nuage_ca_serial }}" -out "{{ nuage_ca_master_plugin_crt }}" -extensions clientauth -extfile "{{ nuage_ca_dir }}"/openssl.cnf -days {{ nuage_mon_cert_validity_period }}
delegate_to: "{{ nuage_ca_master }}"
- name: Remove the req file
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index bb89b65a6..b6d403067 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -80,7 +80,7 @@
- name: Create the master certificates if they do not already exist
command: >
- {{ openshift.common.admin_binary }} create-master-certs
+ {{ openshift.common.client_binary }} adm create-master-certs
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
new file mode 100644
index 000000000..d44438332
--- /dev/null
+++ b/roles/openshift_certificate_expiry/README.md
@@ -0,0 +1,250 @@
+OpenShift Certificate Expiration Checker
+========================================
+
+OpenShift certificate expiration checking. Be warned of certificates
+expiring within a configurable window of days, and notified of
+certificates which have already expired. Certificates examined
+include:
+
+* Master/Node Service Certificates
+* Router/Registry Service Certificates from etcd secrets
+* Master/Node/Router/Registry/Admin `kubeconfig`s
+* Etcd certificates
+
+This role pairs well with the redeploy certificates playbook:
+
+* [Redeploying Certificates Documentation](https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html)
+
+Just like the redeploying certificates playbook, this role is intended
+to be used with an inventory that is representative of the
+cluster. For best results run `ansible-playbook` with the `-v` option.
+
+
+
+Role Variables
+--------------
+
+Core variables in this role:
+
+| Name | Default value | Description |
+|-------------------------------------------------------|--------------------------------|-----------------------------------------------------------------------|
+| `openshift_certificate_expiry_config_base` | `/etc/origin` | Base openshift config directory |
+| `openshift_certificate_expiry_warning_days` | `30` | Flag certificates which will expire in this many days from now |
+| `openshift_certificate_expiry_show_all` | `no` | Include healthy (non-expired and non-warning) certificates in results |
+
+Optional report/result saving variables in this role:
+
+| Name | Default value | Description |
+|-------------------------------------------------------|--------------------------------|-----------------------------------------------------------------------|
+| `openshift_certificate_expiry_generate_html_report` | `no` | Generate an HTML report of the expiry check results |
+| `openshift_certificate_expiry_html_report_path` | `/tmp/cert-expiry-report.html` | The full path to save the HTML report as |
+| `openshift_certificate_expiry_save_json_results` | `no` | Save expiry check results as a json file |
+| `openshift_certificate_expiry_json_results_path` | `/tmp/cert-expiry-report.json` | The full path to save the json report as |
+
+
+Example Playbook
+----------------
+
+Default behavior:
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+Generate HTML and JSON artifacts in their default paths:
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+Change the expiration warning window to 1500 days (good for testing
+the module out):
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+Change the expiration warning window to 1500 days (good for testing
+the module out) and save the results as a JSON file:
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+
+JSON Output
+-----------
+
+There are two top-level keys in the saved JSON results, `data` and
+`summary`.
+
+The `data` key is a hash where the keys are the names of each host
+examined and the values are the check results for each respective
+host.
+
+The `summary` key is a hash that summarizes the number of certificates
+expiring within the configured warning window and the number of
+already expired certificates.
+
+The example below is abbreviated to save space:
+
+```json
+{
+ "data": {
+ "192.168.124.148": {
+ "etcd": [
+ {
+ "cert_cn": "CN:etcd-signer@1474563722",
+ "days_remaining": 350,
+ "expiry": "2017-09-22 17:02:25",
+ "health": "warning",
+ "path": "/etc/etcd/ca.crt"
+ },
+ ],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
+ "days_remaining": 715,
+ "expiry": "2018-09-22 17:08:57",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig"
+ },
+ {
+ "cert_cn": "O:system:cluster-admins, CN:system:admin",
+ "days_remaining": 715,
+ "expiry": "2018-09-22 17:04:40",
+ "health": "warning",
+ "path": "/etc/origin/master/admin.kubeconfig"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2016-10-07 15:26:47.608192",
+ "show_all": "True",
+ "warn_before_date": "2020-11-15 15:26:47.608192",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
+ "days_remaining": 715,
+ "expiry": "2018-09-22 17:04:39",
+ "health": "warning",
+ "path": "/etc/origin/master/master.server.crt"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1474563878",
+ "days_remaining": 1810,
+ "expiry": "2021-09-21 17:04:38",
+ "health": "ok",
+ "path": "/etc/origin/node/ca.crt"
+ }
+ ],
+ "registry": [
+ {
+ "cert_cn": "CN:172.30.101.81, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.101.81, IP Address:172.30.101.81",
+ "days_remaining": 728,
+ "expiry": "2018-10-05 18:54:29",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/registry-certificates"
+ }
+ ],
+ "router": [
+ {
+ "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
+ "days_remaining": 715,
+ "expiry": "2018-09-22 17:48:23",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/router-certs"
+ }
+ ]
+ }
+ },
+ "summary": {
+ "warning": 6,
+ "expired": 0
+ }
+}
+```
+
+The `summary` from the json data can be easily checked for
+warnings/expirations using a variety of command-line tools.
+
+For exampe, using `grep` we can look for the word `summary` and print
+out the 2 lines **after** the match (`-A2`):
+
+```
+$ grep -A2 summary /tmp/cert-expiry-report.json
+ "summary": {
+ "warning": 16,
+ "expired": 0
+```
+
+If available, the [jq](https://stedolan.github.io/jq/) tool can also
+be used to pick out specific values. Example 1 and 2 below show how to
+select just one value, either `warning` or `expired`. Example 3 shows
+how to select both values at once:
+
+```
+$ jq '.summary.warning' /tmp/cert-expiry-report.json
+16
+$ jq '.summary.expired' /tmp/cert-expiry-report.json
+0
+$ jq '.summary.warning,.summary.expired' /tmp/cert-expiry-report.json
+16
+0
+```
+
+
+Requirements
+------------
+
+* None
+
+
+Dependencies
+------------
+
+* None
+
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Tim Bielawa (tbielawa@redhat.com)
diff --git a/roles/openshift_certificate_expiry/defaults/main.yml b/roles/openshift_certificate_expiry/defaults/main.yml
new file mode 100644
index 000000000..6d7b19298
--- /dev/null
+++ b/roles/openshift_certificate_expiry/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+openshift_certificate_expiry_config_base: "/etc/origin"
+openshift_certificate_expiry_warning_days: 30
+openshift_certificate_expiry_show_all: no
+openshift_certificate_expiry_generate_html_report: no
+openshift_certificate_expiry_html_report_path: "/tmp/cert-expiry-report.html"
+openshift_certificate_expiry_save_json_results: no
+openshift_certificate_expiry_json_results_path: "/tmp/cert-expiry-report.json"
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
new file mode 100644
index 000000000..2e2430ee6
--- /dev/null
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+"""
+Custom filters for use in openshift-ansible
+"""
+
+from ansible import errors
+from collections import Mapping
+from distutils.util import strtobool
+from distutils.version import LooseVersion
+from operator import itemgetter
+import OpenSSL.crypto
+import os
+import pdb
+import pkg_resources
+import re
+import json
+import yaml
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from urlparse import urlparse
+
+try:
+ # ansible-2.2
+ # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
+ # ansible.module_utils._text.to_text should be used instead.
+ from ansible.module_utils._text import to_text
+except ImportError:
+ # ansible-2.1
+ from ansible.utils.unicode import to_unicode as to_text
+
+# Disabling too-many-public-methods, since filter methods are necessarily
+# public
+# pylint: disable=too-many-public-methods
+class FilterModule(object):
+ """ Custom ansible filters """
+
+ @staticmethod
+ def oo_cert_expiry_results_to_json(hostvars, play_hosts):
+ """Takes results (`hostvars`) from the openshift_cert_expiry role
+check and serializes them into proper machine-readable JSON
+output. This filter parameter **MUST** be the playbook `hostvars`
+variable. The `play_hosts` parameter is so we know what to loop over
+when we're extrating the values.
+
+Returns:
+
+Results are collected into two top-level keys under the `json_results`
+dict:
+
+* `json_results.data` [dict] - Each individual host check result, keys are hostnames
+* `json_results.summary` [dict] - Summary of number of `warning` and `expired`
+certificates
+
+Example playbook usage:
+
+ - name: Generate expiration results JSON
+ become: no
+ run_once: yes
+ delegate_to: localhost
+ when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ copy:
+ content: "{{ hostvars|oo_cert_expiry_results_to_json() }}"
+ dest: "{{ openshift_certificate_expiry_json_results_path }}"
+
+ """
+ json_result = {
+ 'data': {},
+ 'summary': {},
+ }
+
+ for host in play_hosts:
+ json_result['data'][host] = hostvars[host]['check_results']['check_results']
+
+ total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts])
+ total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts])
+
+ json_result['summary']['warning'] = total_warnings
+ json_result['summary']['expired'] = total_expired
+
+ return json_result
+
+
+ def filters(self):
+ """ returns a mapping of filters to methods """
+ return {
+ "oo_cert_expiry_results_to_json": self.oo_cert_expiry_results_to_json,
+ }
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
new file mode 100644
index 000000000..2cdb87dc1
--- /dev/null
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -0,0 +1,637 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# pylint: disable=line-too-long,invalid-name
+
+"""For details on this module see DOCUMENTATION (below)"""
+
+# router/registry cert grabbing
+import subprocess
+# etcd config file
+import ConfigParser
+# Expiration parsing
+import datetime
+# File path stuff
+import os
+# Config file parsing
+import yaml
+# Certificate loading
+import OpenSSL.crypto
+
+DOCUMENTATION = '''
+---
+module: openshift_cert_expiry
+short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster
+description:
+ - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired.
+ - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following:
+ - C(ok) - not expired, and outside of the expiration C(warning_days) window.
+ - C(warning) - not expired, but will expire between now and the C(warning_days) window.
+ - C(expired) - an expired certificate.
+ - Certificate flagging follow this logic:
+ - If the expiration date is before now then the certificate is classified as C(expired).
+ - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning).
+ - All other conditions are classified as C(ok).
+ - The following keys are ALSO present in the certificate summary:
+ - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted)
+ - C(days_remaining) - The number of days until the certificate expires.
+ - C(expiry) - The date the certificate expires on.
+ - C(path) - The full path to the certificate on the examined host.
+version_added: "1.0"
+options:
+ config_base:
+ description:
+ - Base path to OCP system settings.
+ required: false
+ default: /etc/origin
+ warning_days:
+ description:
+ - Flag certificates which will expire in C(warning_days) days from now.
+ required: false
+ default: 30
+ show_all:
+ description:
+ - Enable this option to show analysis of ALL certificates examined by this module.
+ - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported.
+ required: false
+ default: false
+
+author: "Tim Bielawa (@tbielawa) <tbielawa@redhat.com>"
+'''
+
+EXAMPLES = '''
+# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now
+- openshift_cert_expiry:
+
+# Expand the warning window to show certificates expiring within a year from now
+- openshift_cert_expiry: warning_days=365
+
+# Show expired, soon to expire (now + 30 days), and all other certificates examined
+- openshift_cert_expiry: show_all=true
+'''
+
+
+# We only need this for one thing, we don't care if it doesn't have
+# that many public methods
+#
+# pylint: disable=too-few-public-methods
+class FakeSecHead(object):
+ """etcd does not begin their config file with an opening [section] as
+required by the Python ConfigParser module. We hack around it by
+slipping one in ourselves prior to parsing.
+
+Source: Alex Martelli - http://stackoverflow.com/a/2819788/6490583
+ """
+ def __init__(self, fp):
+ self.fp = fp
+ self.sechead = '[ETCD]\n'
+
+ def readline(self):
+ """Make this look like a file-type object"""
+ if self.sechead:
+ try:
+ return self.sechead
+ finally:
+ self.sechead = None
+ else:
+ return self.fp.readline()
+
+
+######################################################################
+def filter_paths(path_list):
+ """`path_list` - A list of file paths to check. Only files which exist
+will be returned
+ """
+ return [p for p in path_list if os.path.exists(os.path.realpath(p))]
+
+
+def load_and_handle_cert(cert_string, now, base64decode=False):
+ """Load a certificate, split off the good parts, and return some
+useful data
+
+Params:
+
+- `cert_string` (string) - a certificate loaded into a string object
+- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
+- `base64decode` (bool) - run .decode('base64') on the input?
+
+Returns:
+A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certificate_time_remaining)
+
+ """
+ if base64decode:
+ _cert_string = cert_string.decode('base-64')
+ else:
+ _cert_string = cert_string
+
+ cert_loaded = OpenSSL.crypto.load_certificate(
+ OpenSSL.crypto.FILETYPE_PEM, _cert_string)
+
+ ######################################################################
+ # Read all possible names from the cert
+ cert_subjects = []
+ for name, value in cert_loaded.get_subject().get_components():
+ cert_subjects.append('{}:{}'.format(name, value))
+
+ # To read SANs from a cert we must read the subjectAltName
+ # extension from the X509 Object. What makes this more difficult
+ # is that pyOpenSSL does not give extensions as a list, nor does
+ # it provide a count of all loaded extensions.
+ #
+ # Rather, extensions are REQUESTED by index. We must iterate over
+ # all extensions until we find the one called 'subjectAltName'. If
+ # we don't find that extension we'll eventually request an
+ # extension at an index where no extension exists (IndexError is
+ # raised). When that happens we know that the cert has no SANs so
+ # we break out of the loop.
+ i = 0
+ checked_all_extensions = False
+ while not checked_all_extensions:
+ try:
+ # Read the extension at index 'i'
+ ext = cert_loaded.get_extension(i)
+ except IndexError:
+ # We tried to read an extension but it isn't there, that
+ # means we ran out of extensions to check. Abort
+ san = None
+ checked_all_extensions = True
+ else:
+ # We were able to load the extension at index 'i'
+ if ext.get_short_name() == 'subjectAltName':
+ san = ext
+ checked_all_extensions = True
+ else:
+ # Try reading the next extension
+ i += 1
+
+ if san is not None:
+ # The X509Extension object for subjectAltName prints as a
+ # string with the alt names separated by a comma and a
+ # space. Split the string by ', ' and then add our new names
+ # to the list of existing names
+ cert_subjects.extend(str(san).split(', '))
+
+ cert_subject = ', '.join(cert_subjects)
+ ######################################################################
+
+ # Grab the expiration date
+ cert_expiry = cert_loaded.get_notAfter()
+ cert_expiry_date = datetime.datetime.strptime(
+ cert_expiry,
+ # example get_notAfter() => 20180922170439Z
+ '%Y%m%d%H%M%SZ')
+
+ time_remaining = cert_expiry_date - now
+
+ return (cert_subject, cert_expiry_date, time_remaining)
+
+
+def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
+ """Given metadata about a certificate under examination, classify it
+ into one of three categories, 'ok', 'warning', and 'expired'.
+
+Params:
+
+- `cert_meta` dict - A dict with certificate metadata. Required fields
+ include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'.
+- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
+- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires
+- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is
+- `cert_list` list - A list to shove the classified cert into
+
+Return:
+- `cert_list` - The updated list of classified certificates
+ """
+ expiry_str = str(cert_meta['expiry'])
+ # Categorization
+ if cert_meta['expiry'] < now:
+ # This already expired, must NOTIFY
+ cert_meta['health'] = 'expired'
+ elif time_remaining < expire_window:
+ # WARN about this upcoming expirations
+ cert_meta['health'] = 'warning'
+ else:
+ # Not expired or about to expire
+ cert_meta['health'] = 'ok'
+
+ cert_meta['expiry'] = expiry_str
+ cert_list.append(cert_meta)
+ return cert_list
+
+
+def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs):
+ """Calculate the summary text for when the module finishes
+running. This includes counts of each classification and what have
+you.
+
+Params:
+
+- `certificates` (list of dicts) - Processed `expire_check_result`
+ dicts with filled in `health` keys for system certificates.
+- `kubeconfigs` - as above for kubeconfigs
+- `etcd_certs` - as above for etcd certs
+
+Return:
+
+- `summary_results` (dict) - Counts of each cert type classification
+ and total items examined.
+ """
+ items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs
+
+ summary_results = {
+ 'system_certificates': len(certificates),
+ 'kubeconfig_certificates': len(kubeconfigs),
+ 'etcd_certificates': len(etcd_certs),
+ 'router_certs': len(router_certs),
+ 'registry_certs': len(registry_certs),
+ 'total': len(items),
+ 'ok': 0,
+ 'warning': 0,
+ 'expired': 0
+ }
+
+ summary_results['expired'] = len([c for c in items if c['health'] == 'expired'])
+ summary_results['warning'] = len([c for c in items if c['health'] == 'warning'])
+ summary_results['ok'] = len([c for c in items if c['health'] == 'ok'])
+
+ return summary_results
+
+
+######################################################################
+# This is our module MAIN function after all, so there's bound to be a
+# lot of code bundled up into one block
+#
+# pylint: disable=too-many-locals,too-many-locals,too-many-statements,too-many-branches
+def main():
+ """This module examines certificates (in various forms) which compose
+an OpenShift Container Platform cluster
+ """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ config_base=dict(
+ required=False,
+ default="/etc/origin",
+ type='str'),
+ warning_days=dict(
+ required=False,
+ default=30,
+ type='int'),
+ show_all=dict(
+ required=False,
+ default=False,
+ type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ # Basic scaffolding for OpenShift specific certs
+ openshift_base_config_path = module.params['config_base']
+ openshift_master_config_path = os.path.normpath(
+ os.path.join(openshift_base_config_path, "master/master-config.yaml")
+ )
+ openshift_node_config_path = os.path.normpath(
+ os.path.join(openshift_base_config_path, "node/node-config.yaml")
+ )
+ openshift_cert_check_paths = [
+ openshift_master_config_path,
+ openshift_node_config_path,
+ ]
+
+ # Paths for Kubeconfigs. Additional kubeconfigs are conditionally
+ # checked later in the code
+ master_kube_configs = ['admin', 'openshift-master',
+ 'openshift-node', 'openshift-router',
+ 'openshift-registry']
+
+ kubeconfig_paths = []
+ for m_kube_config in master_kube_configs:
+ kubeconfig_paths.append(
+ os.path.normpath(
+ os.path.join(openshift_base_config_path, "master/%s.kubeconfig" % m_kube_config)
+ )
+ )
+
+ # Validate some paths we have the ability to do ahead of time
+ openshift_cert_check_paths = filter_paths(openshift_cert_check_paths)
+ kubeconfig_paths = filter_paths(kubeconfig_paths)
+
+ # etcd, where do you hide your certs? Used when parsing etcd.conf
+ etcd_cert_params = [
+ "ETCD_CA_FILE",
+ "ETCD_CERT_FILE",
+ "ETCD_PEER_CA_FILE",
+ "ETCD_PEER_CERT_FILE",
+ ]
+
+ # Expiry checking stuff
+ now = datetime.datetime.now()
+ # todo, catch exception for invalid input and return a fail_json
+ warning_days = int(module.params['warning_days'])
+ expire_window = datetime.timedelta(days=warning_days)
+
+ # Module stuff
+ #
+ # The results of our cert checking to return from the task call
+ check_results = {}
+ check_results['meta'] = {}
+ check_results['meta']['warning_days'] = warning_days
+ check_results['meta']['checked_at_time'] = str(now)
+ check_results['meta']['warn_before_date'] = str(now + expire_window)
+ check_results['meta']['show_all'] = str(module.params['show_all'])
+ # All the analyzed certs accumulate here
+ ocp_certs = []
+
+ ######################################################################
+ # Sure, why not? Let's enable check mode.
+ if module.check_mode:
+ check_results['ocp_certs'] = []
+ module.exit_json(
+ check_results=check_results,
+ msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'],
+ rc=0,
+ changed=False
+ )
+
+ ######################################################################
+ # Check for OpenShift Container Platform specific certs
+ ######################################################################
+ for os_cert in filter_paths(openshift_cert_check_paths):
+ # Open up that config file and locate the cert and CA
+ with open(os_cert, 'r') as fp:
+ cert_meta = {}
+ cfg = yaml.load(fp)
+ # cert files are specified in parsed `fp` as relative to the path
+ # of the original config file. 'master-config.yaml' with certFile
+ # = 'foo.crt' implies that 'foo.crt' is in the same
+ # directory. certFile = '../foo.crt' is in the parent directory.
+ cfg_path = os.path.dirname(fp.name)
+ cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile'])
+ cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA'])
+
+ ######################################################################
+ # Load the certificate and the CA, parse their expiration dates into
+ # datetime objects so we can manipulate them later
+ for _, v in cert_meta.iteritems():
+ with open(v, 'r') as fp:
+ cert = fp.read()
+ cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
+
+ ######################################################################
+ # /Check for OpenShift Container Platform specific certs
+ ######################################################################
+
+ ######################################################################
+ # Check service Kubeconfigs
+ ######################################################################
+ kubeconfigs = []
+
+ # There may be additional kubeconfigs to check, but their naming
+ # is less predictable than the ones we've already assembled.
+
+ try:
+ # Try to read the standard 'node-config.yaml' file to check if
+ # this host is a node.
+ with open(openshift_node_config_path, 'r') as fp:
+ cfg = yaml.load(fp)
+
+ # OK, the config file exists, therefore this is a
+ # node. Nodes have their own kubeconfig files to
+ # communicate with the master API. Let's read the relative
+ # path to that file from the node config.
+ node_masterKubeConfig = cfg['masterKubeConfig']
+ # As before, the path to the 'masterKubeConfig' file is
+ # relative to `fp`
+ cfg_path = os.path.dirname(fp.name)
+ node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)
+
+ with open(node_kubeconfig, 'r') as fp:
+ # Read in the nodes kubeconfig file and grab the good stuff
+ cfg = yaml.load(fp)
+
+ c = cfg['users'][0]['user']['client-certificate-data']
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining) = load_and_handle_cert(c, now, base64decode=True)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
+ except IOError:
+ # This is not a node
+ pass
+
+ for kube in filter_paths(kubeconfig_paths):
+ with open(kube, 'r') as fp:
+ # TODO: Maybe consider catching exceptions here?
+ cfg = yaml.load(fp)
+
+ # Per conversation, "the kubeconfigs you care about:
+ # admin, router, registry should all be single
+ # value". Following that advice we only grab the data for
+ # the user at index 0 in the 'users' list. There should
+ # not be more than one user.
+ c = cfg['users'][0]['user']['client-certificate-data']
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining) = load_and_handle_cert(c, now, base64decode=True)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
+
+ ######################################################################
+ # /Check service Kubeconfigs
+ ######################################################################
+
+ ######################################################################
+ # Check etcd certs
+ ######################################################################
+ # Some values may be duplicated, make this a set for now so we
+ # unique them all
+ etcd_certs_to_check = set([])
+ etcd_certs = []
+ etcd_cert_params.append('dne')
+ try:
+ with open('/etc/etcd/etcd.conf', 'r') as fp:
+ etcd_config = ConfigParser.ConfigParser()
+ etcd_config.readfp(FakeSecHead(fp))
+
+ for param in etcd_cert_params:
+ try:
+ etcd_certs_to_check.add(etcd_config.get('ETCD', param))
+ except ConfigParser.NoOptionError:
+ # That parameter does not exist, oh well...
+ pass
+ except IOError:
+ # No etcd to see here, move along
+ pass
+
+ for etcd_cert in filter_paths(etcd_certs_to_check):
+ with open(etcd_cert, 'r') as fp:
+ c = fp.read()
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining) = load_and_handle_cert(c, now)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
+
+ ######################################################################
+ # /Check etcd certs
+ ######################################################################
+
+ ######################################################################
+ # Check router/registry certs
+ #
+ # These are saved as secrets in etcd. That means that we can not
+ # simply read a file to grab the data. Instead we're going to
+ # subprocess out to the 'oc get' command. On non-masters this
+ # command will fail, that is expected so we catch that exception.
+ ######################################################################
+ router_certs = []
+ registry_certs = []
+
+ ######################################################################
+ # First the router certs
+ try:
+ router_secrets_raw = subprocess.Popen('oc get secret router-certs -o yaml'.split(),
+ stdout=subprocess.PIPE)
+ router_ds = yaml.load(router_secrets_raw.communicate()[0])
+ router_c = router_ds['data']['tls.crt']
+ router_path = router_ds['metadata']['selfLink']
+ except TypeError:
+ # YAML couldn't load the result, this is not a master
+ pass
+ except OSError:
+ # The OC command doesn't exist here. Move along.
+ pass
+ else:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining) = load_and_handle_cert(router_c, now, base64decode=True)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': router_path,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
+
+ ######################################################################
+ # Now for registry
+ try:
+ registry_secrets_raw = subprocess.Popen('oc get secret registry-certificates -o yaml'.split(),
+ stdout=subprocess.PIPE)
+ registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
+ registry_c = registry_ds['data']['registry.crt']
+ registry_path = registry_ds['metadata']['selfLink']
+ except TypeError:
+ # YAML couldn't load the result, this is not a master
+ pass
+ except OSError:
+ # The OC command doesn't exist here. Move along.
+ pass
+ else:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining) = load_and_handle_cert(registry_c, now, base64decode=True)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': registry_path,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
+
+ ######################################################################
+ # /Check router/registry certs
+ ######################################################################
+
+ res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs)
+
+ msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format(
+ count=res['total'],
+ exp=res['expired'],
+ warn=res['warning'],
+ ok=res['ok'],
+ window=int(module.params['warning_days']),
+ )
+
+ # By default we only return detailed information about expired or
+ # warning certificates. If show_all is true then we will print all
+ # the certificates examined.
+ if not module.params['show_all']:
+ check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']]
+ check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']]
+ check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']]
+ check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']]
+ check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']]
+ else:
+ check_results['ocp_certs'] = ocp_certs
+ check_results['kubeconfigs'] = kubeconfigs
+ check_results['etcd'] = etcd_certs
+ check_results['registry'] = registry_certs
+ check_results['router'] = router_certs
+
+ # Sort the final results to report in order of ascending safety
+ # time. That is to say, the certificates which will expire sooner
+ # will be at the front of the list and certificates which will
+ # expire later are at the end. Router and registry certs should be
+ # limited to just 1 result, so don't bother sorting those.
+ check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
+ check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
+ check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
+
+ # This module will never change anything, but we might want to
+ # change the return code parameter if there is some catastrophic
+ # error we noticed earlier
+ module.exit_json(
+ check_results=check_results,
+ summary=res,
+ msg=msg,
+ rc=0,
+ changed=False
+ )
+
+######################################################################
+# It's just the way we do things in Ansible. So disable this warning
+#
+# pylint: disable=wrong-import-position,import-error
+from ansible.module_utils.basic import AnsibleModule
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_certificate_expiry/meta/main.yml b/roles/openshift_certificate_expiry/meta/main.yml
new file mode 100644
index 000000000..c13b29ba5
--- /dev/null
+++ b/roles/openshift_certificate_expiry/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Tim Bielawa
+ description: OpenShift Certificate Expiry Checker
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ version: 1.0
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies: []
diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml
new file mode 100644
index 000000000..139d5de6e
--- /dev/null
+++ b/roles/openshift_certificate_expiry/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: Check cert expirys on host
+ openshift_cert_expiry:
+ warning_days: "{{ openshift_certificate_expiry_warning_days|int }}"
+ config_base: "{{ openshift_certificate_expiry_config_base }}"
+ show_all: "{{ openshift_certificate_expiry_show_all|bool }}"
+ register: check_results
+
+- name: Generate expiration report HTML
+ become: no
+ run_once: yes
+ template:
+ src: cert-expiry-table.html.j2
+ dest: "{{ openshift_certificate_expiry_html_report_path }}"
+ delegate_to: localhost
+ when: "{{ openshift_certificate_expiry_generate_html_report|bool }}"
+
+- name: Generate the result JSON string
+ run_once: yes
+ set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
+ when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+
+- name: Generate results JSON file
+ become: no
+ run_once: yes
+ template:
+ src: save_json_results.j2
+ dest: "{{ openshift_certificate_expiry_json_results_path }}"
+ delegate_to: localhost
+ when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
diff --git a/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2 b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
new file mode 100644
index 000000000..b05110336
--- /dev/null
+++ b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
@@ -0,0 +1,124 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>OCP Certificate Expiry Report</title>
+ {# For fancy icons and a pleasing font #}
+ <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" />
+ <link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700" rel="stylesheet" />
+ <style type="text/css">
+ body {
+ font-family: 'Source Sans Pro', sans-serif;
+ margin-left: 50px;
+ margin-right: 50px;
+ margin-bottom: 20px;
+ padding-top: 70px;
+ }
+ table {
+ border-collapse: collapse;
+ margin-bottom: 20px;
+ }
+ table, th, td {
+ border: 1px solid black;
+ }
+ th, td {
+ padding: 5px;
+ }
+ .cert-kind {
+ margin-top: 5px;
+ margin-bottom: 5px;
+ }
+ footer {
+ font-size: small;
+ text-align: center;
+ }
+ tr.odd {
+ background-color: #f2f2f2;
+ }
+ </style>
+ </head>
+ <body>
+ <nav class="navbar navbar-default navbar-fixed-top">
+ <div class="container-fluid">
+ <div class="navbar-header">
+ <a class="navbar-brand" href="#">OCP Certificate Expiry Report</a>
+ </div>
+ <div class="collapse navbar-collapse">
+ <p class="navbar-text navbar-right">
+ <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
+ </a>
+ </p>
+ </div>
+ </div>
+ </nav>
+
+ {# Each host has a header and table to itself #}
+ {% for host in play_hosts %}
+ <h1>{{ host }}</h1>
+
+ <p>
+ {{ hostvars[host].check_results.msg }}
+ </p>
+ <ul>
+ <li><b>Expirations checked at:</b> {{ hostvars[host].check_results.check_results.meta.checked_at_time }}</li>
+ <li><b>Warn after date:</b> {{ hostvars[host].check_results.check_results.meta.warn_before_date }}</li>
+ </ul>
+
+ <table border="1" width="100%">
+ {# These are hard-coded right now, but should be grabbed dynamically from the registered results #}
+ {%- for kind in ['ocp_certs', 'etcd', 'kubeconfigs', 'router', 'registry'] -%}
+ <tr>
+ <th colspan="6" style="text-align:center"><h2 class="cert-kind">{{ kind }}</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ {# A row for each certificate examined #}
+ {%- for v in hostvars[host].check_results.check_results[kind] -%}
+
+ {# Let's add some flair and show status visually with fancy icons #}
+ {% if v.health == 'ok' %}
+ {% set health_icon = 'glyphicon glyphicon-ok' %}
+ {% elif v.health == 'warning' %}
+ {% set health_icon = 'glyphicon glyphicon-alert' %}
+ {% else %}
+ {% set health_icon = 'glyphicon glyphicon-remove' %}
+ {% endif %}
+
+ <tr class="{{ loop.cycle('odd', 'even') }}">
+ <td style="text-align:center"><i class="{{ health_icon }}"></i></td>
+ <td style="width:33%">{{ v.cert_cn }}</td>
+ <td>{{ v.health }}</td>
+ <td>{{ v.days_remaining }}</td>
+ <td>{{ v.expiry }}</td>
+ <td>{{ v.path }}</td>
+ </tr>
+ {% endfor %}
+ {# end row generation per cert of this type #}
+ {% endfor %}
+ {# end generation for each kind of cert block #}
+ </table>
+ <hr />
+ {% endfor %}
+ {# end section generation for each host #}
+
+ <footer>
+ <p>
+ Expiration report generated by <a href="https://github.com/openshift/openshift-ansible" target="_blank">openshift-ansible</a>
+ </p>
+ <p>
+ Status icons from bootstrap/glyphicon
+ </p>
+ </footer>
+ </body>
+</html>
diff --git a/roles/openshift_certificate_expiry/templates/save_json_results.j2 b/roles/openshift_certificate_expiry/templates/save_json_results.j2
new file mode 100644
index 000000000..c1173d9ea
--- /dev/null
+++ b/roles/openshift_certificate_expiry/templates/save_json_results.j2
@@ -0,0 +1 @@
+{{ json_result_string | to_nice_json(indent=2)}}
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index fd290c6fc..9ff738d14 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -83,8 +83,13 @@ class BinarySyncer(object):
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
+
+ # The symlink we are creating:
link_path = os.path.join(self.bin_dir, binary_name)
- link_dest = os.path.join(self.bin_dir, binary_name)
+
+ # The expected file we should be linking to:
+ link_dest = os.path.join(self.bin_dir, link_to)
+
if not os.path.exists(link_path) or \
not os.path.islink(link_path) or \
os.path.realpath(link_path) != os.path.realpath(link_dest):
diff --git a/roles/openshift_cloud_provider/tasks/aws.yml b/roles/openshift_cloud_provider/tasks/aws.yml
index bf2abcbf5..127a5b392 100644
--- a/roles/openshift_cloud_provider/tasks/aws.yml
+++ b/roles/openshift_cloud_provider/tasks/aws.yml
@@ -1,4 +1,14 @@
-- name: Create cloud config
+# Work around ini_file create option in 2.2 which defaults to no
+- name: Create cloud config file
+ file:
+ dest: "{{ openshift.common.config_base }}/cloudprovider/aws.conf"
+ state: touch
+ mode: 0660
+ owner: root
+ group: root
+ changed_when: false
+
+- name: Configure AWS cloud provider
ini_file:
dest: "{{ openshift.common.config_base }}/cloudprovider/aws.conf"
section: Global
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
new file mode 100644
index 000000000..14ad8ba94
--- /dev/null
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -0,0 +1,16 @@
+# Work around ini_file create option in 2.2 which defaults to no
+- name: Create cloud config file
+ file:
+ dest: "{{ openshift.common.config_base }}/cloudprovider/gce.conf"
+ state: touch
+ mode: 0660
+ owner: root
+ group: root
+ changed_when: false
+
+- name: Configure GCE cloud provider
+ ini_file:
+ dest: "{{ openshift.common.config_base }}/cloudprovider/gce.conf"
+ section: Global
+ option: multizone
+ value: "true"
diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml
index e217e37ea..ab3055c8b 100644
--- a/roles/openshift_cloud_provider/tasks/main.yml
+++ b/roles/openshift_cloud_provider/tasks/main.yml
@@ -16,3 +16,6 @@
- include: aws.yml
when: cloudprovider_is_aws | bool
+
+- include: gce.yml
+ when: cloudprovider_is_gce | bool
diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml
index 83bf6edc8..c9d953f58 100644
--- a/roles/openshift_cloud_provider/vars/main.yml
+++ b/roles/openshift_cloud_provider/vars/main.yml
@@ -2,3 +2,4 @@
has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}"
cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}"
cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}"
+cloudprovider_is_gce: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'gce' }}"
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 0c8a36d65..c690c5243 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -13,7 +13,7 @@
log_options: "{{ openshift_docker_log_options | default(None) }}"
options: "{{ openshift_docker_options | default(None) }}"
disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
- hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(False) }}"
+ hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
- set_fact:
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index b176ce440..d8c45dbc6 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -9,7 +9,6 @@ XPAAS_VERSION=ose-v1.3.3
ORIGIN_VERSION=${1:-v1.4}
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
-find ${EXAMPLES_BASE} -name '*.yaml' -delete -exclude registry-console.json
TEMP=`mktemp -d`
pushd $TEMP
@@ -23,7 +22,7 @@ cp origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quicks
cp origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
-wget https://raw.githubusercontent.com/jboss-fuse/application-templates/master/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
+wget https://raw.githubusercontent.com/jboss-fuse/application-templates/GA/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
index 64b004ff4..8e43bfbc3 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
@@ -4,11 +4,16 @@
"metadata": {
"name": "mariadb-ephemeral",
"annotations": {
- "description": "MariaDB database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "MariaDB (Ephemeral)",
+ "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mariadb",
"tags": "database,mariadb"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
+ "labels": {
+ "template": "mariadb-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -177,8 +182,5 @@
"value": "sampledb",
"required": true
}
- ],
- "labels": {
- "template": "mariadb-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
index 0d5b39e81..bc85277a9 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
@@ -4,11 +4,16 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "description": "MariaDB database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "MariaDB (Persistent)",
+ "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
+ "labels": {
+ "template": "mariadb-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -201,8 +206,5 @@
"value": "1Gi",
"required": true
}
- ],
- "labels": {
- "template": "mariadb-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
index 5ed92b3ad..605601ef2 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
@@ -5,11 +5,16 @@
"name": "mongodb-ephemeral",
"creationTimestamp": null,
"annotations": {
- "description": "MongoDB database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "MongoDB (Ephemeral)",
+ "description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mongodb",
"tags": "database,mongodb"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-ephemeral-template"
+ },
"objects": [
{
"kind": "Service",
@@ -217,9 +222,5 @@
"value": "3.2",
"required": true
}
- ],
- "labels": {
- "template": "mongodb-ephemeral-template"
- },
- "message": "You can connect to the database using MongoDB connection URL mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}"
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
index 00d550d7d..d2a0d01f0 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
@@ -5,11 +5,16 @@
"name": "mongodb-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "MongoDB (Persistent)",
+ "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -241,9 +246,5 @@
"value": "3.2",
"required": true
}
- ],
- "labels": {
- "template": "mongodb-persistent-template"
- },
- "message": "You can connect to the database using MongoDB connection URL mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}"
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
index a7c731243..0cea42f8b 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
@@ -4,11 +4,16 @@
"metadata": {
"name": "mysql-ephemeral",
"annotations": {
- "description": "MySQL database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "MySQL (Ephemeral)",
+ "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.",
+ "labels": {
+ "template": "mysql-ephemeral-template"
+ },
"objects": [
{
"kind": "Service",
@@ -205,8 +210,5 @@
"value": "5.6",
"required": true
}
- ],
- "labels": {
- "template": "mysql-ephemeral-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
index 05add25e2..fc7cd7d09 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
@@ -4,11 +4,16 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "MySQL (Persistent)",
+ "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.",
+ "labels": {
+ "template": "mysql-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -208,8 +213,5 @@
"value": "5.6",
"required": true
}
- ],
- "labels": {
- "template": "mysql-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
index 1562204e5..505224b62 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
@@ -5,11 +5,16 @@
"name": "postgresql-ephemeral",
"creationTimestamp": null,
"annotations": {
- "description": "PostgreSQL database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "PostgreSQL (Ephemeral)",
+ "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-postgresql",
"tags": "database,postgresql"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "labels": {
+ "template": "postgresql-ephemeral-template"
+ },
"objects": [
{
"kind": "Service",
@@ -205,8 +210,5 @@
"value": "9.5",
"required": true
}
- ],
- "labels": {
- "template": "postgresql-ephemeral-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
index fd2b6a0fb..7ff49782b 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
@@ -5,11 +5,16 @@
"name": "postgresql-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql"
}
},
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "labels": {
+ "template": "postgresql-persistent-template"
+ },
"objects": [
{
"kind": "Service",
@@ -229,8 +234,5 @@
"value": "9.5",
"required": true
}
- ],
- "labels": {
- "template": "postgresql-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
index 6cbf81591..a65d35c2e 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/dotnet_imagestreams.json
@@ -12,16 +12,20 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "dotnet"
+ "name": "dotnet",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": ".Net Core 1.0 S2I image.",
+ "openshift.io/display-name": ".NET Core (Latest)",
+ "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.0/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
"iconClass": "icon-dotnet",
- "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",
+ "tags": "builder,.net,dotnet,dotnetcore",
"supports":"dotnet",
"sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
"sampleContextDir": "1.0/test/asp-net-hello-world"
@@ -34,7 +38,8 @@
{
"name": "1.0",
"annotations": {
- "description": ".Net Core 1.0 S2I image.",
+ "openshift.io/display-name": ".NET Core 1.0",
+ "description": "Build and run .NET Core 1.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.0/README.md.",
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",
"supports":"dotnet:1.0,dotnet",
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
index 386f16d26..a645de7e2 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
@@ -7,14 +7,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "ruby"
+ "name": "ruby",
+ "annotations": {
+ "openshift.io/display-name": "Ruby"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Ruby applications",
+ "openshift.io/display-name": "Ruby (Latest)",
+ "description": "Build and run Ruby applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby",
@@ -28,7 +32,8 @@
{
"name": "2.0",
"annotations": {
- "description": "Build and run Ruby 2.0 applications",
+ "openshift.io/display-name": "Ruby 2.0",
+ "description": "Build and run Ruby 2.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.0,ruby",
@@ -43,7 +48,8 @@
{
"name": "2.2",
"annotations": {
- "description": "Build and run Ruby 2.2 applications",
+ "openshift.io/display-name": "Ruby 2.2",
+ "description": "Build and run Ruby 2.2 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.2,ruby",
@@ -58,7 +64,8 @@
{
"name": "2.3",
"annotations": {
- "description": "Build and run Ruby 2.3 applications",
+ "openshift.io/display-name": "Ruby 2.3",
+ "description": "Build and run Ruby 2.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.3,ruby",
@@ -77,14 +84,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs"
+ "name": "nodejs",
+ "annotations": {
+ "openshift.io/display-name": "Node.js"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run NodeJS applications",
+ "openshift.io/display-name": "Node.js (Latest)",
+ "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs",
@@ -98,7 +109,8 @@
{
"name": "0.10",
"annotations": {
- "description": "Build and run NodeJS 0.10 applications",
+ "openshift.io/display-name": "Node.js 0.10",
+ "description": "Build and run Node.js 0.10 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
@@ -113,7 +125,8 @@
{
"name": "4",
"annotations": {
- "description": "Build and run NodeJS 4 applications",
+ "openshift.io/display-name": "Node.js 4",
+ "description": "Build and run Node.js 4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:4,nodejs",
@@ -132,14 +145,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "perl"
+ "name": "perl",
+ "annotations": {
+ "openshift.io/display-name": "Perl"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Perl applications",
+ "openshift.io/display-name": "Perl (Latest)",
+ "description": "Build and run Perl applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl",
@@ -153,7 +170,8 @@
{
"name": "5.16",
"annotations": {
- "description": "Build and run Perl 5.16 applications",
+ "openshift.io/display-name": "Perl 5.16",
+ "description": "Build and run Perl 5.16 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.16,perl",
@@ -168,7 +186,8 @@
{
"name": "5.20",
"annotations": {
- "description": "Build and run Perl 5.20 applications",
+ "openshift.io/display-name": "Perl 5.20",
+ "description": "Build and run Perl 5.20 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.20,perl",
@@ -188,14 +207,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "php"
+ "name": "php",
+ "annotations": {
+ "openshift.io/display-name": "PHP"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run PHP applications",
+ "openshift.io/display-name": "PHP (Latest)",
+ "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php",
@@ -209,7 +232,8 @@
{
"name": "5.5",
"annotations": {
- "description": "Build and run PHP 5.5 applications",
+ "openshift.io/display-name": "PHP 5.5",
+ "description": "Build and run PHP 5.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.5,php",
@@ -224,7 +248,8 @@
{
"name": "5.6",
"annotations": {
- "description": "Build and run PHP 5.6 applications",
+ "openshift.io/display-name": "PHP 5.6",
+ "description": "Build and run PHP 5.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.6,php",
@@ -243,14 +268,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "python"
+ "name": "python",
+ "annotations": {
+ "openshift.io/display-name": "Python"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Python applications",
+ "openshift.io/display-name": "Python (Latest)",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -264,7 +293,8 @@
{
"name": "3.3",
"annotations": {
- "description": "Build and run Python 3.3 applications",
+ "openshift.io/display-name": "Python 3.3",
+ "description": "Build and run Python 3.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.3,python",
@@ -279,7 +309,8 @@
{
"name": "2.7",
"annotations": {
- "description": "Build and run Python 2.7 applications",
+ "openshift.io/display-name": "Python 2.7",
+ "description": "Build and run Python 2.7 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:2.7,python",
@@ -294,7 +325,8 @@
{
"name": "3.4",
"annotations": {
- "description": "Build and run Python 3.4 applications",
+ "openshift.io/display-name": "Python 3.4",
+ "description": "Build and run Python 3.4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.4,python",
@@ -309,7 +341,8 @@
{
"name": "3.5",
"annotations": {
- "description": "Build and run Python 3.5 applications",
+ "openshift.io/display-name": "Python 3.5",
+ "description": "Build and run Python 3.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.5,python",
@@ -328,14 +361,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "wildfly"
+ "name": "wildfly",
+ "annotations": {
+ "openshift.io/display-name": "WildFly"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Java applications on Wildfly",
+ "openshift.io/display-name": "WildFly (Latest)",
+ "description": "Build and run WildFly applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of WildFly available on OpenShift, including major versions updates.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"jee,java",
@@ -349,7 +386,8 @@
{
"name": "8.1",
"annotations": {
- "description": "Build and run Java applications on Wildfly 8.1",
+ "openshift.io/display-name": "WildFly 8.1",
+ "description": "Build and run WildFly 8.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
@@ -364,7 +402,8 @@
{
"name": "9.0",
"annotations": {
- "description": "Build and run Java applications on Wildfly 9.0",
+ "openshift.io/display-name": "WildFly 9.0",
+ "description": "Build and run WildFly 9.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:9.0,jee,java",
@@ -379,7 +418,8 @@
{
"name": "10.0",
"annotations": {
- "description": "Build and run Java applications on Wildfly 10.0",
+ "openshift.io/display-name": "WildFly 10.0",
+ "description": "Build and run WildFly 10.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:10.0,jee,java",
@@ -394,7 +434,8 @@
{
"name": "10.1",
"annotations": {
- "description": "Build and run Java applications on Wildfly 10.1",
+ "openshift.io/display-name": "WildFly 10.1",
+ "description": "Build and run WildFly 10.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:10.1,jee,java",
@@ -413,14 +454,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mysql"
+ "name": "mysql",
+ "annotations": {
+ "openshift.io/display-name": "MySQL"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MySQL database",
+ "openshift.io/display-name": "MySQL (Latest)",
+ "description": "Provides a MySQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
"iconClass": "icon-mysql-database",
"tags": "mysql"
},
@@ -432,7 +477,8 @@
{
"name": "5.5",
"annotations": {
- "description": "Provides a MySQL v5.5 database",
+ "openshift.io/display-name": "MySQL 5.5",
+ "description": "Provides a MySQL 5.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
"version": "5.5"
@@ -445,7 +491,8 @@
{
"name": "5.6",
"annotations": {
- "description": "Provides a MySQL v5.6 database",
+ "openshift.io/display-name": "MySQL 5.6",
+ "description": "Provides a MySQL 5.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
"version": "5.6"
@@ -462,14 +509,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mariadb"
+ "name": "mariadb",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MariaDB database",
+ "openshift.io/display-name": "MariaDB (Latest)",
+ "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mariadb",
"tags": "mariadb"
},
@@ -481,7 +532,8 @@
{
"name": "10.1",
"annotations": {
- "description": "Provides a MariaDB v10.1 database",
+ "openshift.io/display-name": "MariaDB 10.1",
+ "description": "Provides a MariaDB 10.1 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
"iconClass": "icon-mariadb",
"tags": "mariadb",
"version": "10.1"
@@ -498,14 +550,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "postgresql"
+ "name": "postgresql",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a PostgreSQL database",
+ "openshift.io/display-name": "PostgreSQL (Latest)",
+ "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
"iconClass": "icon-postgresql",
"tags": "postgresql"
},
@@ -517,7 +573,8 @@
{
"name": "9.2",
"annotations": {
- "description": "Provides a PostgreSQL v9.2 database",
+ "openshift.io/display-name": "PostgreSQL 9.2",
+ "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.2"
@@ -530,7 +587,8 @@
{
"name": "9.4",
"annotations": {
- "description": "Provides a PostgreSQL v9.4 database",
+ "openshift.io/display-name": "PostgreSQL 9.4",
+ "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.4"
@@ -543,7 +601,8 @@
{
"name": "9.5",
"annotations": {
- "description": "Provides a PostgreSQL v9.5 database",
+ "openshift.io/display-name": "PostgreSQL 9.5",
+ "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.5"
@@ -560,14 +619,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mongodb"
+ "name": "mongodb",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MongoDB database",
+ "openshift.io/display-name": "MongoDB (Latest)",
+ "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mongodb",
"tags": "mongodb"
},
@@ -579,7 +642,8 @@
{
"name": "2.4",
"annotations": {
- "description": "Provides a MongoDB v2.4 database",
+ "openshift.io/display-name": "MongoDB 2.4",
+ "description": "Provides a MongoDB 2.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "2.4"
@@ -592,7 +656,8 @@
{
"name": "2.6",
"annotations": {
- "description": "Provides a MongoDB v2.6 database",
+ "openshift.io/display-name": "MongoDB 2.6",
+ "description": "Provides a MongoDB 2.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "2.6"
@@ -605,7 +670,8 @@
{
"name": "3.2",
"annotations": {
- "description": "Provides a MongoDB v3.2 database",
+ "openshift.io/display-name": "MongoDB 3.2",
+ "description": "Provides a MongoDB 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "3.2"
@@ -622,26 +688,31 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jenkins"
+ "name": "jenkins",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a Jenkins server",
+ "openshift.io/display-name": "Jenkins (Latest)",
+ "description": "Provides a Jenkins server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
"iconClass": "icon-jenkins",
"tags": "jenkins"
},
"from": {
"kind": "ImageStreamTag",
- "name": "1"
+ "name": "2"
}
},
{
"name": "1",
"annotations": {
- "description": "Provides a Jenkins server",
+ "openshift.io/display-name": "Jenkins 1.X",
+ "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
"tags": "jenkins",
"version": "1.x"
@@ -650,6 +721,20 @@
"kind": "DockerImage",
"name": "openshift/jenkins-1-centos7:latest"
}
+ },
+ {
+ "name": "2",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 2.X",
+ "description": "Provides a Jenkins v2.x server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "2.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/jenkins-2-centos7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
index 56c63263b..9b9cd236f 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
@@ -7,14 +7,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "ruby"
+ "name": "ruby",
+ "annotations": {
+ "openshift.io/display-name": "Ruby"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Ruby applications",
+ "openshift.io/display-name": "Ruby (Latest)",
+ "description": "Build and run Ruby applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby",
@@ -28,7 +32,8 @@
{
"name": "2.0",
"annotations": {
- "description": "Build and run Ruby 2.0 applications",
+ "openshift.io/display-name": "Ruby 2.0",
+ "description": "Build and run Ruby 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.0,ruby",
@@ -43,7 +48,8 @@
{
"name": "2.2",
"annotations": {
- "description": "Build and run Ruby 2.2 applications",
+ "openshift.io/display-name": "Ruby 2.2",
+ "description": "Build and run Ruby 2.2 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.2,ruby",
@@ -58,7 +64,8 @@
{
"name": "2.3",
"annotations": {
- "description": "Build and run Ruby 2.3 applications",
+ "openshift.io/display-name": "Ruby 2.3",
+ "description": "Build and run Ruby 2.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.3,ruby",
@@ -77,14 +84,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "nodejs"
+ "name": "nodejs",
+ "annotations": {
+ "openshift.io/display-name": "Node.js"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run NodeJS applications",
+ "openshift.io/display-name": "Node.js (Latest)",
+ "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs",
@@ -98,7 +109,8 @@
{
"name": "0.10",
"annotations": {
- "description": "Build and run NodeJS 0.10 applications",
+ "openshift.io/display-name": "Node.js 0.10",
+ "description": "Build and run Node.js 0.10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
@@ -113,7 +125,8 @@
{
"name": "4",
"annotations": {
- "description": "Build and run NodeJS 4.x applications",
+ "openshift.io/display-name": "Node.js 4",
+ "description": "Build and run Node.js 4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:4,nodejs",
@@ -132,14 +145,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "perl"
+ "name": "perl",
+ "annotations": {
+ "openshift.io/display-name": "Perl"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Perl applications",
+ "openshift.io/display-name": "Perl (Latest)",
+ "description": "Build and run Perl applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl",
@@ -153,7 +170,8 @@
{
"name": "5.16",
"annotations": {
- "description": "Build and run Perl 5.16 applications",
+ "openshift.io/display-name": "Perl 5.16",
+ "description": "Build and run Perl 5.16 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.16,perl",
@@ -168,7 +186,8 @@
{
"name": "5.20",
"annotations": {
- "description": "Build and run Perl 5.20 applications",
+ "openshift.io/display-name": "Perl 5.20",
+ "description": "Build and run Perl 5.20 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.20,perl",
@@ -188,14 +207,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "php"
+ "name": "php",
+ "annotations": {
+ "openshift.io/display-name": "PHP"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run PHP applications",
+ "openshift.io/display-name": "PHP (Latest)",
+ "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php",
@@ -209,7 +232,8 @@
{
"name": "5.5",
"annotations": {
- "description": "Build and run PHP 5.5 applications",
+ "openshift.io/display-name": "PHP 5.5",
+ "description": "Build and run PHP 5.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.5,php",
@@ -224,7 +248,8 @@
{
"name": "5.6",
"annotations": {
- "description": "Build and run PHP 5.6 applications",
+ "openshift.io/display-name": "PHP 5.6",
+ "description": "Build and run PHP 5.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.6,php",
@@ -243,14 +268,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "python"
+ "name": "python",
+ "annotations": {
+ "openshift.io/display-name": "Python"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Build and run Python applications",
+ "openshift.io/display-name": "Python (Latest)",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -264,7 +293,8 @@
{
"name": "3.3",
"annotations": {
- "description": "Build and run Python 3.3 applications",
+ "openshift.io/display-name": "Python 3.3",
+ "description": "Build and run Python 3.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.3,python",
@@ -279,7 +309,8 @@
{
"name": "2.7",
"annotations": {
- "description": "Build and run Python 2.7 applications",
+ "openshift.io/display-name": "Python 2.7",
+ "description": "Build and run Python 2.7 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:2.7,python",
@@ -294,7 +325,8 @@
{
"name": "3.4",
"annotations": {
- "description": "Build and run Python 3.4 applications",
+ "openshift.io/display-name": "Python 3.4",
+ "description": "Build and run Python 3.4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.4,python",
@@ -309,7 +341,8 @@
{
"name": "3.5",
"annotations": {
- "description": "Build and run Python 3.5 applications",
+ "openshift.io/display-name": "Python 3.5",
+ "description": "Build and run Python 3.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.5,python",
@@ -328,14 +361,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mysql"
+ "name": "mysql",
+ "annotations": {
+ "openshift.io/display-name": "MySQL"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MySQL database",
+ "openshift.io/display-name": "MySQL (Latest)",
+ "description": "Provides a MySQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
"iconClass": "icon-mysql-database",
"tags": "mysql"
},
@@ -347,7 +384,8 @@
{
"name": "5.5",
"annotations": {
- "description": "Provides a MySQL v5.5 database",
+ "openshift.io/display-name": "MySQL 5.5",
+ "description": "Provides a MySQL 5.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
"version": "5.5"
@@ -360,7 +398,8 @@
{
"name": "5.6",
"annotations": {
- "description": "Provides a MySQL v5.6 database",
+ "openshift.io/display-name": "MySQL 5.6",
+ "description": "Provides a MySQL 5.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
"version": "5.6"
@@ -377,14 +416,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mariadb"
+ "name": "mariadb",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MariaDB database",
+ "openshift.io/display-name": "MariaDB (Latest)",
+ "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mariadb",
"tags": "mariadb"
},
@@ -396,7 +439,8 @@
{
"name": "10.1",
"annotations": {
- "description": "Provides a MariaDB v10.1 database",
+ "openshift.io/display-name": "MariaDB 10.1",
+ "description": "Provides a MariaDB 10.1 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
"iconClass": "icon-mariadb",
"tags": "mariadb",
"version": "10.1"
@@ -413,14 +457,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "postgresql"
+ "name": "postgresql",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a PostgreSQL database",
+ "openshift.io/display-name": "PostgreSQL (Latest)",
+ "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
"iconClass": "icon-postgresql",
"tags": "postgresql"
},
@@ -432,7 +480,8 @@
{
"name": "9.2",
"annotations": {
- "description": "Provides a PostgreSQL v9.2 database",
+ "openshift.io/display-name": "PostgreSQL 9.2",
+ "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.2"
@@ -445,7 +494,8 @@
{
"name": "9.4",
"annotations": {
- "description": "Provides a PostgreSQL v9.4 database",
+ "openshift.io/display-name": "PostgreSQL 9.4",
+ "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.4"
@@ -458,7 +508,8 @@
{
"name": "9.5",
"annotations": {
- "description": "Provides a PostgreSQL v9.5 database",
+ "openshift.io/display-name": "PostgreSQL 9.5",
+ "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
"version": "9.5"
@@ -475,14 +526,18 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "mongodb"
+ "name": "mongodb",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a MongoDB database",
+ "openshift.io/display-name": "MongoDB (Latest)",
+ "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mongodb",
"tags": "mongodb"
},
@@ -494,7 +549,8 @@
{
"name": "2.4",
"annotations": {
- "description": "Provides a MongoDB v2.4 database",
+ "openshift.io/display-name": "MongoDB 2.4",
+ "description": "Provides a MongoDB 2.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "2.4"
@@ -507,7 +563,8 @@
{
"name": "2.6",
"annotations": {
- "description": "Provides a MongoDB v2.6 database",
+ "openshift.io/display-name": "MongoDB 2.6",
+ "description": "Provides a MongoDB 2.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "2.6"
@@ -520,7 +577,8 @@
{
"name": "3.2",
"annotations": {
- "description": "Provides a MongoDB v3.2 database",
+ "openshift.io/display-name": "MongoDB 3.2",
+ "description": "Provides a MongoDB 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
"version": "3.2"
@@ -537,26 +595,31 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
- "name": "jenkins"
+ "name": "jenkins",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins"
+ }
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": {
- "description": "Provides a Jenkins server",
+ "openshift.io/display-name": "Jenkins (Latest)",
+ "description": "Provides a Jenkins server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
"iconClass": "icon-jenkins",
"tags": "jenkins"
},
"from": {
"kind": "ImageStreamTag",
- "name": "1"
+ "name": "2"
}
},
{
"name": "1",
"annotations": {
- "description": "Provides a Jenkins server",
+ "openshift.io/display-name": "Jenkins 1.X",
+ "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
"tags": "jenkins",
"version": "1.x"
@@ -565,6 +628,20 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest"
}
+ },
+ {
+ "name": "2",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 2.X",
+ "description": "Provides a Jenkins 2.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "2.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
index ab4982690..354978891 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "cakephp-mysql-example",
"annotations": {
- "description": "An example CakePHP application with a MySQL database",
- "tags": "quickstart,php,cakephp,mysql",
+ "openshift.io/display-name": "CakePHP + MySQL (Ephemeral)",
+ "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,php,cakephp",
"iconClass": "icon-php"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
"template": "cakephp-mysql-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
index cc7920b7d..9fc5be5e0 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "dancer-mysql-example",
"annotations": {
- "description": "An example Dancer application with a MySQL database",
- "tags": "quickstart,perl,dancer,mysql",
+ "openshift.io/display-name": "Dancer + MySQL (Ephemeral)",
+ "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,perl,dancer",
"iconClass": "icon-perl"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
"template": "dancer-mysql-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
index 7d1dea11b..590d5fd4f 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "django-psql-example",
"annotations": {
- "description": "An example Django application with a PostgreSQL database",
- "tags": "quickstart,python,django,postgresql",
+ "openshift.io/display-name": "Django + PostgreSQL (Ephemeral)",
+ "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,python,django",
"iconClass": "icon-python"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
"template": "django-psql-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
index 880f0b34e..fc7423840 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
@@ -5,12 +5,13 @@
"name": "jenkins-ephemeral",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, without persistent storage.\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "openshift.io/display-name": "Jenkins (Ephemeral)",
+ "description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
},
- "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
"objects": [
{
"kind": "Route",
@@ -89,6 +90,7 @@
"livenessProbe": {
"timeoutSeconds": 3,
"initialDelaySeconds": 120,
+ "failureThreshold" : 30,
"httpGet": {
"path": "/login",
"port": 8080
@@ -96,8 +98,12 @@
},
"env": [
{
- "name": "JENKINS_PASSWORD",
- "value": "${JENKINS_PASSWORD}"
+ "name": "OPENSHIFT_ENABLE_OAUTH",
+ "value": "${ENABLE_OAUTH}"
+ },
+ {
+ "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
+ "value": "true"
},
{
"name": "KUBERNETES_MASTER",
@@ -150,7 +156,10 @@
"kind": "ServiceAccount",
"apiVersion": "v1",
"metadata": {
- "name": "${JENKINS_SERVICE_NAME}"
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "serviceaccounts.openshift.io/oauth-redirectreference.jenkins": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${JENKINS_SERVICE_NAME}\"}}"
+ }
}
},
{
@@ -236,12 +245,10 @@
"value": "jenkins-jnlp"
},
{
- "name": "JENKINS_PASSWORD",
- "displayName": "Jenkins Password",
- "description": "Password for the Jenkins 'admin' user.",
- "generate": "expression",
- "from": "[a-zA-Z0-9]{16}",
- "required": true
+ "name": "ENABLE_OAUTH",
+ "displayName": "Enable OAuth in Jenkins",
+ "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
+ "value": "true"
},
{
"name": "MEMORY_LIMIT",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
index 3291f3594..acf59ee94 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
@@ -5,12 +5,13 @@
"name": "jenkins-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, with persistent storage.\nYou must have persistent volumes available in your cluster to use this template.",
+ "openshift.io/display-name": "Jenkins (Persistent)",
+ "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
},
- "message": "A Jenkins service has been created in your project. The username/password are admin/${JENKINS_PASSWORD}. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
"objects": [
{
"kind": "Route",
@@ -106,6 +107,7 @@
"livenessProbe": {
"timeoutSeconds": 3,
"initialDelaySeconds": 120,
+ "failureThreshold" : 30,
"httpGet": {
"path": "/login",
"port": 8080
@@ -113,8 +115,12 @@
},
"env": [
{
- "name": "JENKINS_PASSWORD",
- "value": "${JENKINS_PASSWORD}"
+ "name": "OPENSHIFT_ENABLE_OAUTH",
+ "value": "${ENABLE_OAUTH}"
+ },
+ {
+ "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
+ "value": "true"
},
{
"name": "KUBERNETES_MASTER",
@@ -167,7 +173,10 @@
"kind": "ServiceAccount",
"apiVersion": "v1",
"metadata": {
- "name": "${JENKINS_SERVICE_NAME}"
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "serviceaccounts.openshift.io/oauth-redirectreference.jenkins": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${JENKINS_SERVICE_NAME}\"}}"
+ }
}
},
{
@@ -253,12 +262,10 @@
"value": "jenkins-jnlp"
},
{
- "name": "JENKINS_PASSWORD",
- "displayName": "Jenkins Password",
- "description": "Password for the Jenkins 'admin' user.",
- "generate": "expression",
- "from": "[a-zA-Z0-9]{16}",
- "required": true
+ "name": "ENABLE_OAUTH",
+ "displayName": "Enable OAuth in Jenkins",
+ "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
+ "value": "true"
},
{
"name": "MEMORY_LIMIT",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
index 6ab4a1781..d4b4add18 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "nodejs-mongodb-example",
"annotations": {
- "description": "An example Node.js application with a MongoDB database",
- "tags": "quickstart,nodejs,mongodb",
+ "openshift.io/display-name": "Node.js + MongoDB (Ephemeral)",
+ "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,nodejs",
"iconClass": "icon-nodejs"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
"template": "nodejs-mongodb-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
index 50d60f2bb..baed15d8a 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
@@ -4,11 +4,13 @@
"metadata": {
"name": "rails-postgresql-example",
"annotations": {
- "description": "An example Rails application with a PostgreSQL database",
- "tags": "quickstart,ruby,rails,postgresql",
+ "openshift.io/display-name": "Rails + PostgreSQL (Ephemeral)",
+ "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby"
}
},
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
"template": "rails-postgresql-example"
},
@@ -19,7 +21,8 @@
"metadata": {
"name": "${NAME}",
"annotations": {
- "description": "Exposes and load balances the application pods"
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json
index 65060cc2c..ed0e94bed 100644
--- a/roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.4/xpaas-streams/fis-image-streams.json
@@ -20,23 +20,13 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 1.0 Java S2I images.",
+ "description": "JBoss Fuse Integration Services 6.2.1 Java S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
- },
- {
- "name": "2.0",
- "annotations": {
- "description": "JBoss Fuse Integration Services 2.0 Java S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,jboss-fuse,java,xpaas",
- "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
- "version": "2.0"
- }
- }
+ }
]
}
},
@@ -52,23 +42,13 @@
{
"name": "1.0",
"annotations": {
- "description": "JBoss Fuse Integration Services 1.0 Karaf S2I images.",
+ "description": "JBoss Fuse Integration Services 6.2.1 Karaf S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,jboss-fuse,java,karaf,xpaas",
"supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
"version": "1.0"
}
- },
- {
- "name": "2.0",
- "annotations": {
- "description": "JBoss Fuse Integration Services 2.0 Karaf S2I images.",
- "iconClass": "icon-jboss",
- "tags": "builder,jboss-fuse,java,karaf,xpaas",
- "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
- "version": "2.0"
- }
- }
+ }
]
}
}
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 82536e8af..551e21e72 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -106,22 +106,6 @@
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
changed_when: false
-- name: Import origin infrastructure-templates
- command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_origin_base }}
- when: openshift_examples_load_centos | bool
- register: oex_import_infrastructure
- failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0"
- changed_when: false
-
-- name: Import enterprise infrastructure-templates
- command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_enterprise_base }}
- when: openshift_examples_load_rhel | bool
- register: oex_import_infrastructure
- failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0"
- changed_when: false
-
- name: Remove old xPaas template files
file:
path: "{{ item }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index cb642e12e..d797eb4d3 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -7,7 +7,13 @@
"""Ansible module for retrieving and setting openshift related facts"""
-import ConfigParser
+try:
+ # python2
+ import ConfigParser
+except ImportError:
+ # python3
+ import configparser as ConfigParser
+
import copy
import io
import os
@@ -55,11 +61,10 @@ def migrate_docker_facts(facts):
facts['docker'][param] = facts[role].pop(old_param)
if 'node' in facts and 'portal_net' in facts['node']:
- facts['docker']['hosted_registry_insecure'] = True
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
# log_options was originally meant to be a comma separated string, but
- # we now prefer an actual list, with backward compatability:
+ # we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
isinstance(facts['docker']['log_options'], basestring):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
@@ -203,9 +208,9 @@ def query_metadata(metadata_url, headers=None, expect_json=False):
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
- return module.from_json(result.read())
+ return module.from_json(to_native(result.read()))
else:
- return [line.strip() for line in result.readlines()]
+ return [to_native(line.strip()) for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
@@ -313,7 +318,7 @@ def normalize_aws_facts(metadata, facts):
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
- for ips_var, int_var in var_map.iteritems():
+ for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
if isinstance(ips, basestring):
int_info[ips_var] = [ips]
@@ -833,23 +838,29 @@ def set_version_facts_if_unset(facts):
version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
+ version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('1.4.0')
else:
version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
+ version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('3.4.0')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
- version_gte_3_3_or_1_3 = False
+ version_gte_3_3_or_1_3 = True
+ version_gte_3_4_or_1_4 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
+ facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
- if version_gte_3_3_or_1_3:
+ if version_gte_3_4_or_1_4:
+ examples_content_version = 'v1.4'
+ elif version_gte_3_3_or_1_3:
examples_content_version = 'v1.3'
elif version_gte_3_2_or_1_2:
examples_content_version = 'v1.2'
@@ -902,10 +913,29 @@ def set_sdn_facts_if_unset(facts, system_facts):
facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
+ # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
+ # these might be overridden if they exist in the master config file
+ sdn_cluster_network_cidr = '10.128.0.0/14'
+ sdn_host_subnet_length = '9'
+
+ master_cfg_path = os.path.join(facts['common']['config_base'],
+ 'master/master-config.yaml')
+ if os.path.isfile(master_cfg_path):
+ with open(master_cfg_path, 'r') as master_cfg_f:
+ config = yaml.safe_load(master_cfg_f.read())
+
+ if 'networkConfig' in config:
+ if 'clusterNetworkCIDR' in config['networkConfig']:
+ sdn_cluster_network_cidr = \
+ config['networkConfig']['clusterNetworkCIDR']
+ if 'hostSubnetLength' in config['networkConfig']:
+ sdn_host_subnet_length = \
+ config['networkConfig']['hostSubnetLength']
+
if 'sdn_cluster_network_cidr' not in facts['master']:
- facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
+ facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
if 'sdn_host_subnet_length' not in facts['master']:
- facts['master']['sdn_host_subnet_length'] = '8'
+ facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
@@ -913,7 +943,7 @@ def set_sdn_facts_if_unset(facts, system_facts):
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
- for val in system_facts.itervalues():
+ for val in itervalues(system_facts):
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
@@ -1035,12 +1065,23 @@ def get_current_config(facts):
return current_config
def build_kubelet_args(facts):
- """ Build node kubelet_args """
- cloud_cfg_path = os.path.join(facts['common']['config_base'],
- 'cloudprovider')
+ """Build node kubelet_args
+
+In the node-config.yaml file, kubeletArgument sub-keys have their
+values provided as a list. Hence the gratuitous use of ['foo'] below.
+ """
+ cloud_cfg_path = os.path.join(
+ facts['common']['config_base'],
+ 'cloudprovider')
+
+ # We only have to do this stuff on hosts that are nodes
if 'node' in facts:
+ # Any changes to the kubeletArguments parameter are stored
+ # here first.
kubelet_args = {}
+
if 'cloudprovider' in facts:
+ # EVERY cloud is special <3
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
kubelet_args['cloud-provider'] = ['aws']
@@ -1050,6 +1091,29 @@ def build_kubelet_args(facts):
kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
kubelet_args['cloud-provider'] = ['gce']
+ kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
+
+ # Automatically add node-labels to the kubeletArguments
+ # parameter. See BZ1359848 for additional details.
+ #
+ # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
+ if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
+ # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
+ # into ['foo=bar', 'a=b']
+ #
+ # On the openshift_node_labels inventory variable we loop
+ # over each key-value tuple (from .items()) and join the
+ # key to the value with an '=' character, this produces a
+ # list.
+ #
+ # map() seems to be returning an itertools.imap object
+ # instead of a list. We cast it to a list ourselves.
+ labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
+ if labels_str != '':
+ kubelet_args['node-labels'] = labels_str
+
+ # If we've added items to the kubelet_args dict then we need
+ # to merge the new items back into the main facts object.
if kubelet_args != {}:
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
@@ -1070,6 +1134,7 @@ def build_controller_args(facts):
controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
controller_args['cloud-provider'] = ['gce']
+ controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
@@ -1090,6 +1155,7 @@ def build_api_server_args(facts):
api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
api_server_args['cloud-provider'] = ['gce']
+ api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
@@ -1138,6 +1204,24 @@ def get_docker_version_info():
}
return result
+def get_hosted_registry_insecure():
+ """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
+ registry is currently insecure.
+ """
+ hosted_registry_insecure = None
+ if os.path.exists('/etc/sysconfig/docker'):
+ try:
+ ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
+ ini_fp = io.StringIO(ini_str)
+ config = ConfigParser.RawConfigParser()
+ config.readfp(ini_fp)
+ options = config.get('root', 'OPTIONS')
+ if 'insecure-registry' in options:
+ hosted_registry_insecure = True
+ except:
+ pass
+ return hosted_registry_insecure
+
def get_openshift_version(facts):
""" Get current version of openshift on the host.
@@ -1156,7 +1240,7 @@ def get_openshift_version(facts):
# version
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
- return facts['common']['version']
+ return chomp_commit_offset(facts['common']['version'])
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
@@ -1171,7 +1255,27 @@ def get_openshift_version(facts):
_, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
version = parse_openshift_version(output)
- return version
+ return chomp_commit_offset(version)
+
+
+def chomp_commit_offset(version):
+ """Chomp any "+git.foo" commit offset string from the given `version`
+ and return the modified version string.
+
+Ex:
+- chomp_commit_offset(None) => None
+- chomp_commit_offset(1337) => "1337"
+- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
+- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
+- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
+ """
+ if version is None:
+ return version
+ else:
+ # Stringify, just in case it's a Number type. Split by '+' and
+ # return the first split. No concerns about strings without a
+ # '+', .split() returns an array of the original string.
+ return str(version).split('+')[0]
def get_container_openshift_version(facts):
@@ -1266,7 +1370,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
'image_policy_config']
facts = dict()
- for key, value in orig.iteritems():
+ for key, value in iteritems(orig):
# Key exists in both old and new facts.
if key in new:
if key in inventory_json_facts:
@@ -1347,8 +1451,11 @@ def save_local_facts(filename, facts):
"""
try:
fact_dir = os.path.dirname(filename)
- if not os.path.exists(fact_dir):
- os.makedirs(fact_dir)
+ try:
+ os.makedirs(fact_dir) # try to make the directory
+ except OSError as exception:
+ if exception.errno != errno.EEXIST: # but it is okay if it is already there
+ raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts))
os.chmod(filename, 0o600)
@@ -1437,8 +1544,8 @@ def set_proxy_facts(facts):
safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
- common['no_proxy'].append('.' + common['dns_domain'])
- # We always add ourselves no matter what
+ # We always add local dns domain and ourselves no matter what
+ common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = sort_unique(common['no_proxy'])
facts['common'] = common
@@ -1488,7 +1595,7 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
+ etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
@@ -1498,7 +1605,7 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'aep3_beta/node'
ovs_image = 'aep3_beta/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
+ etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
pod_image = 'aep3_beta/aep-pod'
router_image = 'aep3_beta/aep-haproxy-router'
registry_image = 'aep3_beta/aep-docker-registry'
@@ -1508,7 +1615,7 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
+ etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
@@ -1698,8 +1805,8 @@ class OpenShiftFacts(object):
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_container_facts_if_unset(facts)
facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
@@ -1790,13 +1897,15 @@ class OpenShiftFacts(object):
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
- hosted_registry_insecure=True,
options='--log-driver=json-file --log-opt max-size=50m')
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
docker['version'] = version_info['version']
docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
+ hosted_registry_insecure = get_hosted_registry_insecure()
+ if hosted_registry_insecure is not None:
+ docker['hosted_registry_insecure'] = hosted_registry_insecure
defaults['docker'] = docker
if 'clock' in roles:
@@ -2015,7 +2124,7 @@ class OpenShiftFacts(object):
facts_to_set[self.role] = facts
if openshift_env != {} and openshift_env != None:
- for fact, value in openshift_env.iteritems():
+ for fact, value in iteritems(openshift_env):
oo_env_facts = dict()
current_level = oo_env_facts
keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
@@ -2073,7 +2182,7 @@ class OpenShiftFacts(object):
facts (dict): facts to clean
"""
facts_to_remove = []
- for fact, value in facts.iteritems():
+ for fact, value in iteritems(facts):
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
@@ -2204,6 +2313,9 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils.urls import *
+from ansible.module_utils.six import iteritems, itervalues
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import b
if __name__ == '__main__':
main()
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index b0785a9e4..4d4a232cc 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -38,6 +38,8 @@
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- name: Set repoquery command
set_fact:
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 4e525a2da..93b701ebc 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -30,7 +30,7 @@
- name: Create OpenShift registry
command: >
- {{ openshift.common.admin_binary }} registry --create
+ {{ openshift.common.client_binary }} adm registry --create
--config={{ openshift_hosted_kubeconfig }}
{% if replicas > 1 -%}
--replicas={{ replicas }}
@@ -53,7 +53,7 @@
- include: secure.yml
static: no
- when: replicas | int > 0
+ when: replicas | int > 0 and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
- include: storage/object_storage.yml
static: no
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index 664edef41..d2f6ba5f6 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -33,7 +33,7 @@
- name: Create registry certificates if they do not exist
command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
+ {{ openshift.common.client_binary }} adm ca create-server-cert
--signer-cert=/etc/origin/master/ca.crt
--signer-key=/etc/origin/master/ca.key
--signer-serial=/etc/origin/master/ca.serial.txt
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 0cad19c34..b944fa522 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -48,7 +48,7 @@
- name: Create OpenShift router
command: >
- {{ openshift.common.admin_binary }} router --create
+ {{ openshift.common.client_binary }} adm router --create
--config={{ openshift_hosted_kubeconfig }}
{% if replicas > 1 -%}
--replicas={{ replicas }}
@@ -73,7 +73,7 @@
{% if openshift.hosted.router.name | default(none) is not none -%}
{{ openshift.hosted.router.name }}
{% endif -%}
-
+
register: openshift_hosted_router_results
changed_when: "'service exists' not in openshift_hosted_router_results.stdout"
failed_when: "openshift_hosted_router_results.rc != 0 and 'service exists' not in openshift_hosted_router_results.stdout and 'deployment_config' not in openshift_hosted_router_results.stderr and 'service' not in openshift_hosted_router_results.stderr"
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index cfe7ac81c..557fd03af 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -8,52 +8,55 @@ storage:
enabled: true
cache:
blobdescriptor: inmemory
-{% if openshift.hosted.registry.storage.provider == 's3' %}
+{% if openshift_hosted_registry_storage_provider | default('') == 's3' %}
s3:
- accesskey: {{ openshift.hosted.registry.storage.s3.accesskey }}
- secretkey: {{ openshift.hosted.registry.storage.s3.secretkey }}
- region: {{ openshift.hosted.registry.storage.s3.region }}
- bucket: {{ openshift.hosted.registry.storage.s3.bucket }}
+ accesskey: {{ openshift_hosted_registry_storage_s3_accesskey }}
+ secretkey: {{ openshift_hosted_registry_storage_s3_secretkey }}
+ region: {{ openshift_hosted_registry_storage_s3_region }}
+{% if openshift_hosted_registry_storage_s3_regionendpoint is defined %}
+ regionendpoint: {{ openshift_hosted_registry_storage_s3_regionendpoint }}
+{% endif %}
+ bucket: {{ openshift_hosted_registry_storage_s3_bucket }}
encrypt: false
secure: true
v4auth: true
- rootdirectory: {{ openshift.hosted.registry.storage.s3.rootdirectory | default('/registry') }}
- chunksize: "{{ openshift.hosted.registry.storage.s3.chunksize | default(26214400) }}"
-{% elif openshift.hosted.registry.storage.provider == 'azure_blob' %}
+ rootdirectory: {{ openshift_hosted_registry_storage_s3_rootdirectory | default('/registry') }}
+ chunksize: "{{ openshift_hosted_registry_storage_s3_chunksize | default(26214400) }}"
+{% elif openshift_hosted_registry_storage_provider | default('') == 'azure_blob' %}
azure:
- accountname: {{ openshift.hosted.registry.storage.azure_blob.accountname }}
- accountkey: {{ openshift.hosted.registry.storage.azure_blob.accountkey }}
- container: {{ openshift.hosted.registry.storage.azure_blob.container }}
- realm: {{ openshift.hosted.registry.storage.azure_blob.realm }}
-{% elif openshift.hosted.registry.storage.provider == 'swift' %}
+ accountname: {{ openshift_hosted_registry_storage_azure_blob_accountname }}
+ accountkey: {{ openshift_hosted_registry_storage_azure_blob_accountkey }}
+ container: {{ openshift_hosted_registry_storage_azure_blob_container }}
+ realm: {{ openshift_hosted_registry_storage_azure_blob_realm }}
+{% elif openshift_hosted_registry_storage_provider | default('') == 'swift' %}
swift:
- authurl: {{ openshift.hosted.registry.storage.swift.authurl }}
- username: {{ openshift.hosted.registry.storage.swift.username }}
- password: {{ openshift.hosted.registry.storage.swift.password }}
- container: {{ openshift.hosted.registry.storage.swift.container }}
-{% if 'region' in openshift.hosted.registry.storage.swift %}
- region: {{ openshift.hosted.registry.storage.swift.region }}
+ authurl: {{ openshift_hosted_registry_storage_swift_authurl }}
+ username: {{ openshift_hosted_registry_storage_swift_username }}
+ password: {{ openshift_hosted_registry_storage_swift_password }}
+ container: {{ openshift_hosted_registry_storage_swift_container }}
+{% if openshift_hosted_registry_storage_swift_region is defined %}
+ region: {{ openshift_hosted_registry_storage_swift_region }}
{% endif -%}
-{% if 'tenant' in openshift.hosted.registry.storage.swift %}
- tenant: {{ openshift.hosted.registry.storage.swift.tenant }}
+{% if openshift_hosted_registry_storage_swift_tenant is defined %}
+ tenant: {{ openshift_hosted_registry_storage_swift_tenant }}
{% endif -%}
-{% if 'tenantid' in openshift.hosted.registry.storage.swift %}
- tenantid: {{ openshift.hosted.registry.storage.swift.tenantid }}
+{% if openshift_hosted_registry_storage_swift_tenantid is defined %}
+ tenantid: {{ openshift_hosted_registry_storage_swift_tenantid }}
{% endif -%}
-{% if 'domain' in openshift.hosted.registry.storage.swift %}
- domain: {{ openshift.hosted.registry.storage.swift.domain }}
+{% if openshift_hosted_registry_storage_swift_domain is defined %}
+ domain: {{ openshift_hosted_registry_storage_swift_domain }}
{% endif -%}
-{% if 'domainid' in openshift.hosted.registry.storage.swift %}
- domainid: {{ openshift.hosted.registry.storage.swift.domainid }}
+{% if openshift_hosted_registry_storage_swift_domainid %}
+ domainid: {{ openshift_hosted_registry_storage_swift_domainid }}
{% endif -%}
-{% elif openshift.hosted.registry.storage.provider == 'gcs' %}
+{% elif openshift_hosted_registry_storage_provider | default('') == 'gcs' %}
gcs:
- bucket: {{ openshift.hosted.registry.storage.gcs.bucket }}
-{% if 'keyfile' in openshift.hosted.registry.storage.gcs %}
- keyfile: {{ openshift.hosted.registry.storage.gcs.keyfile }}
+ bucket: {{ openshift_hosted_registry_storage_gcs_bucket }}
+{% if openshift_hosted_registry_storage_gcs_keyfile is defined %}
+ keyfile: {{ openshift_hosted_registry_storage_gcs_keyfile }}
{% endif -%}
-{% if 'rootdirectory' in openshift.hosted.registry.storage.gcs %}
- rootdirectory: {{ openshift.hosted.registry.storage.gcs.rootdirectory }}
+{% if openshift_hosted_registry_storage_gcs_rootdirectory is defined %}
+ rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory }}
{% endif -%}
{% endif -%}
auth:
@@ -67,16 +70,16 @@ middleware:
repository:
- name: openshift
options:
- pullthrough: {{ openshift.hosted.registry.pullthrough | default(true) }}
- acceptschema2: {{ openshift.hosted.registry.acceptschema2 | default(false) }}
- enforcequota: {{ openshift.hosted.registry.enforcequota | default(false) }}
-{% if openshift.hosted.registry.storage.provider == 's3' and 'cloudfront' in openshift.hosted.registry.storage.s3 %}
+ pullthrough: {{ openshift_hosted_registry_pullthrough | default(true) }}
+ acceptschema2: {{ openshift_hosted_registry_acceptschema2 | default(false) }}
+ enforcequota: {{ openshift_hosted_registry_enforcequota | default(false) }}
+{% if openshift_hosted_registry_storage_provider | default('') == 's3' and openshift_hosted_registry_storage_s3_cloudfront_baseurl is defined %}
storage:
- name: cloudfront
options:
- baseurl: {{ openshift.hosted.registry.storage.s3.cloudfront.baseurl }}
- privatekey: {{ openshift.hosted.registry.storage.s3.cloudfront.privatekeyfile }}
- keypairid: {{ openshift.hosted.registry.storage.s3.cloudfront.keypairid }}
+ baseurl: {{ openshift_hosted_registry_storage_s3_cloudfront_baseurl }}
+ privatekey: {{ openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile }}
+ keypairid: {{ openshift_hosted_registry_storage_s3_cloudfront_keypairid }}
{% elif openshift.common.version_gte_3_3_or_1_3 | bool %}
storage:
- name: openshift
diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml
index e357899e5..a01f24df8 100644
--- a/roles/openshift_hosted_logging/defaults/main.yml
+++ b/roles/openshift_hosted_logging/defaults/main.yml
@@ -1,2 +1,2 @@
---
-examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples"
+hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
index 8331f0389..8754616d9 100644
--- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
@@ -46,8 +46,8 @@
- name: "Remove deployer template"
command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
- register: delete_ouput
- failed_when: delete_ouput.rc == 1 and 'exists' not in delete_ouput.stderr
+ register: delete_output
+ failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
- name: Delete temp directory
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index 65af1c08e..625af9acd 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -17,7 +17,7 @@
cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
- - name: Check for logging project already exists
+ - name: "Check for logging project already exists"
command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
register: logging_project_result
@@ -25,7 +25,7 @@
- name: "Create logging project"
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
when: logging_project_result.stdout == ""
- name: "Changing projects"
@@ -40,33 +40,50 @@
- name: "Create templates for logging accounts and the deployer"
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ examples_base }}/infrastructure-templates/{{ 'enterprise' if openshift_deployment_type == 'openshift-enterprise' else 'origin' }}/logging-deployer.yaml
- register: template_output
- failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"
+ {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
+ -f {{ hosted_base }}/logging-deployer.yaml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ -n logging
+ register: logging_import_template
+ failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
+ changed_when: "'created' in logging_import_template.stdout"
- name: "Process the logging accounts template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -"
+ shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
register: process_deployer_accounts
failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
- name: "Set permissions for logging-deployer service account"
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
register: permiss_output
failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
- name: "Set permissions for fluentd"
command: >
- {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd_output
failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
- name: "Set additional permissions for fluentd"
command: >
- {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+ {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
+ add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd2_output
failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ - name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-cluster-role-to-user rolebinding-reader \
+ system:serviceaccount:logging:aggregated-logging-elasticsearch
+ register: rolebinding_reader_output
+ failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+
- name: "Create ConfigMap for deployer parameters"
command: >
{{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
diff --git a/roles/openshift_hosted_templates/defaults/main.yml b/roles/openshift_hosted_templates/defaults/main.yml
new file mode 100644
index 000000000..f4fd15089
--- /dev/null
+++ b/roles/openshift_hosted_templates/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
+hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}"
+
+content_version: "{{ openshift.common.examples_content_version }}"
+
+registry_url: ""
+registry_host: "{{ registry_url.split('/')[0] if '.' in registry_url.split('/')[0] else '' }}"
+
+openshift_hosted_templates_import_command: 'create'
diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.0/enterprise/logging-deployer.yaml
index b3b60bf9b..b3b60bf9b 100644
--- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.0/enterprise/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.0/enterprise/metrics-deployer.yaml
index ddd9f2f75..ddd9f2f75 100644
--- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.0/enterprise/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.0/origin/logging-deployer.yaml
index 4c798e148..4c798e148 100644
--- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.0/origin/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.0/origin/metrics-deployer.yaml
index 3e9bcde5b..3e9bcde5b 100644
--- a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.0/origin/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.1/enterprise/logging-deployer.yaml
index 9c8f1071a..9c8f1071a 100644
--- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.1/enterprise/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.1/enterprise/metrics-deployer.yaml
index 99f2df4fa..99f2df4fa 100644
--- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.1/enterprise/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.1/origin/logging-deployer.yaml
index 9257b1f28..9257b1f28 100644
--- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.1/origin/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.1/origin/metrics-deployer.yaml
index 30d79acee..30d79acee 100644
--- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.1/origin/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.2/enterprise/logging-deployer.yaml
index b6975eead..b6975eead 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.2/enterprise/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.2/enterprise/metrics-deployer.yaml
index 032f94a18..032f94a18 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.2/enterprise/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.2/origin/logging-deployer.yaml
index 8b28f872f..8b28f872f 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.2/origin/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.2/origin/metrics-deployer.yaml
index ab62ae76f..ab62ae76f 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.2/origin/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
index a8d4b1cbb..13cef2d66 100644
--- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
@@ -200,13 +200,13 @@ items:
name: MODE
value: "install"
-
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
+ description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.1", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
value: "registry.access.redhat.com/openshift3/"
-
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
+ description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.1", set version "3.3.1"'
name: IMAGE_VERSION
- value: "3.3.0"
+ value: "3.3.1"
-
description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
name: IMAGE_PULL_SECRET
diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml
index afd47ec7c..5e21e3a7a 100644
--- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/metrics-deployer.yaml
@@ -101,7 +101,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.3.0"
+ value: "3.3.1"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
index 11478263c..11478263c 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/logging-deployer.yaml
index 8b28f872f..8b28f872f 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/origin/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/metrics-deployer.yaml
index 5f2290419..5f2290419 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/origin/metrics-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
index 80cc4233b..80cc4233b 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index a8d4b1cbb..ddfda1272 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -66,6 +66,15 @@ items:
- watch
- delete
- update
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: rolebinding-reader
+ rules:
+ - resources:
+ - clusterrolebindings
+ verbs:
+ - get
-
apiVersion: v1
kind: RoleBinding
@@ -88,6 +97,17 @@ items:
subjects:
- kind: ServiceAccount
name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-elasticsearch-view-role
+ roleRef:
+ kind: ClusterRole
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
-
apiVersion: "v1"
kind: "Template"
@@ -200,13 +220,13 @@ items:
name: MODE
value: "install"
-
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
+ description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
value: "registry.access.redhat.com/openshift3/"
-
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
+ description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"'
name: IMAGE_VERSION
- value: "3.3.0"
+ value: "3.4.0"
-
description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
name: IMAGE_PULL_SECRET
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
index afd47ec7c..66051755c 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
@@ -62,6 +62,8 @@ objects:
value: ${MASTER_URL}
- name: MODE
value: ${MODE}
+ - name: CONTINUE_ON_ERROR
+ value: ${CONTINUE_ON_ERROR}
- name: REDEPLOY
value: ${REDEPLOY}
- name: IGNORE_PREFLIGHT
@@ -84,6 +86,8 @@ objects:
value: ${HEAPSTER_NODE_ID}
- name: METRIC_RESOLUTION
value: ${METRIC_RESOLUTION}
+ - name: STARTUP_TIMEOUT
+ value: ${STARTUP_TIMEOUT}
dnsPolicy: ClusterFirst
restartPolicy: Never
serviceAccount: metrics-deployer
@@ -101,7 +105,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.3.0"
+ value: "3.4.0"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -114,6 +118,10 @@ parameters:
description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
name: MODE
value: "deploy"
+-
+ description: "Set to true to continue even if the deployer runs into an error."
+ name: CONTINUE_ON_ERROR
+ value: "false"
-
description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
name: REDEPLOY
@@ -154,3 +162,7 @@ parameters:
description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds"
name: METRIC_RESOLUTION
value: "15s"
+-
+ description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
+ name: STARTUP_TIMEOUT
+ value: "500"
diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
index 11478263c..11478263c 100644
--- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml
diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/logging-deployer.yaml
index 8b28f872f..bc8c79ca1 100644
--- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/origin/logging-deployer.yaml
@@ -66,6 +66,15 @@ items:
- watch
- delete
- update
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: rolebinding-reader
+ rules:
+ - resources:
+ - clusterrolebindings
+ verbs:
+ - get
-
apiVersion: v1
kind: RoleBinding
@@ -88,6 +97,17 @@ items:
subjects:
- kind: ServiceAccount
name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-elasticsearch-view-role
+ roleRef:
+ kind: ClusterRole
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
-
apiVersion: "v1"
kind: "Template"
diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml
index 5f2290419..54691572a 100644
--- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/origin/metrics-deployer.yaml
@@ -86,6 +86,8 @@ objects:
value: ${HEAPSTER_NODE_ID}
- name: METRIC_RESOLUTION
value: ${METRIC_RESOLUTION}
+ - name: STARTUP_TIMEOUT
+ value: ${STARTUP_TIMEOUT}
dnsPolicy: ClusterFirst
restartPolicy: Never
serviceAccount: metrics-deployer
@@ -160,3 +162,7 @@ parameters:
description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds"
name: METRIC_RESOLUTION
value: "15s"
+-
+ description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
+ name: STARTUP_TIMEOUT
+ value: "500"
diff --git a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
index 80cc4233b..80cc4233b 100644
--- a/roles/openshift_examples/files/examples/v1.4/infrastructure-templates/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml
diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml
new file mode 100644
index 000000000..9c12865bf
--- /dev/null
+++ b/roles/openshift_hosted_templates/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Andrew Butcher
+ description: OpenShift Hosted Templates
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_common
diff --git a/roles/openshift_hosted_templates/sync-templates.sh b/roles/openshift_hosted_templates/sync-templates.sh
new file mode 100755
index 000000000..1188bc440
--- /dev/null
+++ b/roles/openshift_hosted_templates/sync-templates.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# Utility script to update the ansible repo with the latest templates for
+# metrics and logging
+#
+# This script should be run from
+# openshift-ansible/roles/openshift_hosted_templates
+
+ORIGIN_VERSION=v1.4
+EXAMPLES_BASE=$(pwd)/files/${ORIGIN_VERSION}
+find ${EXAMPLES_BASE} -name '*.json' -delete
+TEMP=`mktemp -d`
+pushd $TEMP
+
+wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/origin/metrics-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ${EXAMPLES_BASE}/enterprise/metrics-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml -O ${EXAMPLES_BASE}/origin/logging-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/enterprise/logging-deployer.yaml
+
+popd
+git diff files
diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml
new file mode 100644
index 000000000..7d176bce3
--- /dev/null
+++ b/roles/openshift_hosted_templates/tasks/main.yml
@@ -0,0 +1,65 @@
+---
+- name: Create local temp dir for OpenShift hosted templates copy
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ become: False
+ register: copy_hosted_templates_mktemp
+ run_once: True
+
+- name: Create tar of OpenShift examples
+ local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" .
+ args:
+ # Disables the following warning:
+ # Consider using unarchive module rather than running tar
+ warn: no
+ become: False
+ register: copy_hosted_templates_tar
+
+- name: Create remote OpenShift hosted templates directory
+ file:
+ dest: "{{ hosted_base }}"
+ state: directory
+ mode: 0755
+
+- name: Unarchive the OpenShift hosted templates on the remote
+ unarchive:
+ src: "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar"
+ dest: "{{ hosted_base }}/"
+
+- name: Cleanup the OpenShift hosted templates temp dir
+ become: False
+ local_action: file dest="{{ copy_hosted_templates_mktemp.stdout }}" state=absent
+
+- name: Modify registry paths if registry_url is not registry.access.redhat.com
+ shell: >
+ find {{ hosted_base }} -type f | xargs -n 1 sed -i 's|registry.access.redhat.com|{{ registry_host | quote }}|g'
+ when: registry_host != '' and openshift_hosted_modify_imagestreams | default(openshift_examples_modify_imagestreams | default(False)) | bool
+
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- name: Record kubeconfig tmp dir
+ set_fact:
+ openshift_hosted_templates_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_hosted_templates_kubeconfig }}
+ changed_when: False
+
+- name: Create or update hosted templates
+ command: >
+ {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }}
+ -f {{ hosted_base }}
+ --config={{ openshift_hosted_templates_kubeconfig }}
+ -n openshift
+ register: oht_import_templates
+ failed_when: "'already exists' not in oht_import_templates.stderr and oht_import_templates.rc != 0"
+ changed_when: "'created' in oht_import_templates.stdout"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index b8e6a7da2..863738143 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -10,6 +10,16 @@
path: /etc/systemd/system/haproxy.service.d
state: directory
+# Work around ini_file create option in 2.2 which defaults to no
+- name: Create limits.conf file
+ file:
+ dest: /etc/systemd/system/haproxy.service.d/limits.conf
+ state: touch
+ mode: 0660
+ owner: root
+ group: root
+ changed_when: false
+
- name: Configure the nofile limits for haproxy
ini_file:
dest: /etc/systemd/system/haproxy.service.d/limits.conf
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index d1cc5b274..28e4e46e9 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -26,7 +26,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
+ {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
with_items: "{{ openshift_nodes }}"
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index 5d7a3c038..bdaf64b3f 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -8,9 +8,9 @@
cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}}
changed_when: false
-- name: Add Managment Infrastructure project
+- name: Add Management Infrastructure project
command: >
- {{ openshift.common.admin_binary }} new-project
+ {{ openshift.common.client_binary }} adm new-project
management-infra
--description="Management Infrastructure"
--config={{manage_iq_tmp_conf}}
@@ -52,7 +52,7 @@
- name: Configure role/user permissions
command: >
- {{ openshift.common.admin_binary }} {{item}}
+ {{ openshift.common.client_binary }} adm {{item}}
--config={{manage_iq_tmp_conf}}
with_items: "{{manage_iq_tasks}}"
register: osmiq_perm_task
@@ -61,7 +61,7 @@
- name: Configure 3_2 role/user permissions
command: >
- {{ openshift.common.admin_binary }} {{item}}
+ {{ openshift.common.client_binary }} adm {{item}}
--config={{manage_iq_tmp_conf}}
with_items: "{{manage_iq_openshift_3_2_tasks}}"
register: osmiq_perm_3_2_task
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 913f3b0ae..e119db1a2 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -17,7 +17,7 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent
+ curl --silent --tlsv1.2
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
{% else %}
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 1a59717c7..1d6758c4a 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -57,7 +57,7 @@
- name: Create the policy file if it does not already exist
command: >
- {{ openshift.common.admin_binary }} create-bootstrap-policy-file
+ {{ openshift.common.client_binary }} adm create-bootstrap-policy-file
--filename={{ openshift_master_policy }}
args:
creates: "{{ openshift_master_policy }}"
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index a8f5d7351..088e8db43 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -18,7 +18,7 @@ LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ openshift.common.data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
-Restart=on-failure
+Restart=always
RestartSec=5s
[Install]
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 4d45e8591..a52ae578c 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -131,6 +131,7 @@ kubernetesMasterConfig:
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
+ schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }}
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: ""
servicesSubnet: {{ openshift.common.portal_net }}
@@ -158,7 +159,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}
+{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 1f50fdce1..0e78d2d23 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -15,6 +15,7 @@ LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ openshift.common.data_dir }}
SyslogIdentifier=atomic-openshift-master-api
+Restart=always
RestartSec=5s
[Install]
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index bf62696f0..94928f88c 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -19,7 +19,7 @@ LimitNOFILE=131072
LimitCORE=infinity
WorkingDirectory={{ openshift.common.data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
-Restart=on-failure
+Restart=always
RestartSec=5s
[Install]
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index ffde59358..e9b7de330 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -52,7 +52,7 @@
- name: Create the master certificates if they do not already exist
command: >
- {{ openshift.common.admin_binary }} create-master-certs
+ {{ openshift.common.client_binary }} adm create-master-certs
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index e0c0fc644..62ac1aef5 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -80,3 +80,4 @@
controllers_env_vars: "{{ openshift_master_controllers_env_vars | default(None) }}"
audit_config: "{{ openshift_master_audit_config | default(None) }}"
metrics_public_url: "{% if openshift_hosted_metrics_deploy | default(false) %}https://{{ metrics_hostname }}/hawkular/metrics{% endif %}"
+ scheduler_args: "{{ openshift_master_scheduler_args | default(None) }}"
diff --git a/roles/openshift_metrics/defaults/main.yml b/roles/openshift_metrics/defaults/main.yml
new file mode 100644
index 000000000..a01f24df8
--- /dev/null
+++ b/roles/openshift_metrics/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index 913f3b0ae..e119db1a2 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -17,7 +17,7 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent
+ curl --silent --tlsv1.2
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
{% else %}
diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_metrics/tasks/install.yml
index 9c4eb22d7..98e21375a 100644
--- a/roles/openshift_metrics/tasks/install.yml
+++ b/roles/openshift_metrics/tasks/install.yml
@@ -30,13 +30,31 @@
- name: Add edit permission to the openshift-infra project to metrics-deployer SA
command: >
- {{ openshift.common.admin_binary }}
+ {{ openshift.common.client_binary }} adm
--config={{ openshift_metrics_kubeconfig }}
--namespace openshift-infra
policy add-role-to-user edit
system:serviceaccount:openshift-infra:metrics-deployer
when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
+- name: Test hawkular view permissions
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
+ register: view_rolebindings
+ changed_when: false
+
+- name: Add view permissions to hawkular SA
+ command: >
+ {{ openshift.common.client_binary }} adm
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ policy add-role-to-user view
+ system:serviceaccount:openshift-infra:hawkular
+ when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings"
+
- name: Test cluster-reader permissions
command: >
{{ openshift.common.client_binary }}
@@ -48,7 +66,7 @@
- name: Add cluster-reader permission to the openshift-infra project to heapster SA
command: >
- {{ openshift.common.admin_binary }}
+ {{ openshift.common.client_binary }} adm
--config={{ openshift_metrics_kubeconfig }}
--namespace openshift-infra
policy add-cluster-role-to-user cluster-reader
@@ -70,8 +88,15 @@
- name: Build metrics deployer command
set_fact:
deployer_cmd: "{{ openshift.common.client_binary }} process -f \
- {{ metrics_template_dir }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }},USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }},DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}{{ image_prefix }}{{ image_version }},MODE={{ deployment_mode }} \
+ {{ hosted_base }}/metrics-deployer.yaml -v \
+ HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }} \
+ -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \
+ -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \
+ -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \
+ -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}
+ {{ image_prefix }} \
+ {{ image_version }} \
+ -v MODE={{ deployment_mode }} \
| {{ openshift.common.client_binary }} --namespace openshift-infra \
--config={{ openshift_metrics_kubeconfig }} \
create -o name -f -"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 88432a9f8..26af279b1 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -36,10 +36,8 @@
metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
- cassandra_nodes: "{{ ',CASSANDRA_NODES=' ~ openshift.hosted.metrics.cassandra_nodes if 'cassandra' in openshift.hosted.metrics else '' }}"
- cassandra_pv_size: "{{ ',CASSANDRA_PV_SIZE=' ~ openshift.hosted.metrics.storage_volume_size if openshift.hosted.metrics.storage_volume_size | default(none) is not none else '' }}"
- image_prefix: "{{ ',IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer_prefix if 'deployer_prefix' in openshift.hosted.metrics else '' }}"
- image_version: "{{ ',IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer_version if 'deployer_version' in openshift.hosted.metrics else '' }}"
+ image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
+ image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
- name: Check for existing metrics pods
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 64c90db50..6022694bc 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -9,6 +9,10 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
+ # Reset node labels to an empty dictionary.
+ - role: node
+ local_facts:
+ labels: {}
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
@@ -73,10 +77,15 @@
- set_fact:
ovs_service_status_changed: "{{ ovs_start_result | changed }}"
+- file:
+ dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
+ state: directory
+ when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
template:
- dest: "{{ openshift_node_config_file }}"
+ dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
src: node.yaml.v1.j2
backup: true
owner: root
@@ -96,6 +105,7 @@
line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
- regex: '^AWS_SECRET_ACCESS_KEY='
line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
+ no_log: True
when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
notify:
- restart node
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 40d1dd50b..f722a6e69 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -24,11 +24,26 @@
notify:
- restart openvswitch
+# May be a temporary workaround.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1331590
+- name: Create OpenvSwitch service.d directory
+ file: path=/etc/systemd/system/openvswitch.service.d/ state=directory
+ when: openshift.common.use_openshift_sdn | default(true) | bool
+
+- name: Install OpenvSwitch service OOM fix
+ template:
+ dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf"
+ src: openvswitch-avoid-oom.conf
+ when: openshift.common.use_openshift_sdn | default(true) | bool
+ register: install_oom_fix_result
+ notify:
+ - restart openvswitch
+
- name: Install OpenvSwitch docker service file
template:
dest: "/etc/systemd/system/openvswitch.service"
src: openvswitch.docker.service
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
+ when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool
notify:
- restart openvswitch
@@ -42,7 +57,7 @@
- regex: '^OPTIONS='
line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
- regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift_node_config_file }}"
+ line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- regex: '^IMAGE_VERSION='
line: "IMAGE_VERSION={{ openshift_image_tag }}"
notify:
@@ -67,6 +82,6 @@
- name: Reload systemd units
command: systemctl daemon-reload
- when: openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)
+ when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed
notify:
- restart node
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 9bcaf4d84..55ae4bf54 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -27,7 +27,7 @@ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool %}
+{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
diff --git a/roles/openshift_node/templates/openvswitch-avoid-oom.conf b/roles/openshift_node/templates/openvswitch-avoid-oom.conf
new file mode 100644
index 000000000..3229bc56b
--- /dev/null
+++ b/roles/openshift_node/templates/openvswitch-avoid-oom.conf
@@ -0,0 +1,3 @@
+# Avoid the OOM killer for openvswitch and it's children:
+[Service]
+OOMScoreAdjust=-1000
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
deleted file mode 100644
index 77a9694de..000000000
--- a/roles/openshift_node/vars/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
-openshift_node_config_file: "{{ openshift_node_config_dir }}/node-config.yaml"
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 80ab4bb1d..69bcd3668 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -44,7 +44,7 @@
- name: Generate the node client config
command: >
- {{ openshift.common.admin_binary }} create-api-client-config
+ {{ openshift.common.client_binary }} adm create-api-client-config
{% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
@@ -63,7 +63,7 @@
- name: Generate the node server certificate
command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
+ {{ openshift.common.client_binary }} adm ca create-server-cert
--cert={{ openshift_node_generated_config_dir }}/server.crt
--key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
--overwrite=true
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 5a187710b..ced0fa663 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -1,4 +1,5 @@
#!/bin/bash -x
+# -*- mode: sh; sh-indentation: 2 -*-
# This NetworkManager dispatcher script replicates the functionality of
# NetworkManager's dns=dnsmasq however, rather than hardcoding the listening
@@ -28,7 +29,16 @@ cd /etc/sysconfig/network-scripts
[ -f ../network ] && . ../network
if [[ $2 =~ ^(up|dhcp4-change)$ ]]; then
- # couldn't find an existing method to determine if the interface owns the
+ # If the origin-upstream-dns config file changed we need to restart
+ NEEDS_RESTART=0
+ UPSTREAM_DNS='/etc/dnsmasq.d/origin-upstream-dns.conf'
+ # We'll regenerate the dnsmasq origin config in a temp file first
+ UPSTREAM_DNS_TMP=`mktemp`
+ UPSTREAM_DNS_TMP_SORTED=`mktemp`
+ CURRENT_UPSTREAM_DNS_SORTED=`mktemp`
+
+ ######################################################################
+ # couldn't find an existing method to determine if the interface owns the
# default route
def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')
def_route_int=$(/sbin/ip route get to ${def_route} | awk '{print $3}')
@@ -43,15 +53,37 @@ domain-needed
server=/cluster.local/172.30.0.1
server=/30.172.in-addr.arpa/172.30.0.1
EOF
+ # New config file, must restart
+ NEEDS_RESTART=1
fi
- # zero out our upstream servers list and feed it into dnsmasq
- echo -n > /etc/dnsmasq.d/origin-upstream-dns.conf
+
+ ######################################################################
+ # Generate a new origin dns config file
for ns in ${IP4_NAMESERVERS}; do
if [[ ! -z $ns ]]; then
- echo "server=${ns}" >> /etc/dnsmasq.d/origin-upstream-dns.conf
+ echo "server=${ns}"
fi
- done
- systemctl restart dnsmasq
+ done > $UPSTREAM_DNS_TMP
+
+ # Sort it in case DNS servers arrived in a different order
+ sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED
+ sort $UPSTREAM_DNS > $CURRENT_UPSTREAM_DNS_SORTED
+
+ # Compare to the current config file (sorted)
+ NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'`
+ CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'`
+
+ if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then
+ # DNS has changed, copy the temp file to the proper location (-Z
+ # sets default selinux context) and set the restart flag
+ cp -Z $UPSTREAM_DNS_TMP $UPSTREAM_DNS
+ NEEDS_RESTART=1
+ fi
+
+ ######################################################################
+ if [ "${NEEDS_RESTART}" -eq "1" ]; then
+ systemctl restart dnsmasq
+ fi
sed -i '0,/^nameserver/ s/^nameserver.*$/nameserver '"${def_route_ip}"'/g' /etc/resolv.conf
@@ -59,4 +91,7 @@ EOF
echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> /etc/resolv.conf
fi
fi
+
+ # Clean up after yourself
+ rm -f $UPSTREAM_DNS_TMP $UPSTREAM_DNS_TMP_SORTED $CURRENT_UPSTREAM_DNS_SORTED
fi
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
index bd9a0ffb6..396c27295 100644
--- a/roles/openshift_node_dnsmasq/tasks/main.yml
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -29,6 +29,12 @@
when: openshift_node_dnsmasq_additional_config_file is defined
notify: restart dnsmasq
+- name: Enable dnsmasq
+ service:
+ name: dnsmasq
+ enabled: yes
+ state: started
+
# Dynamic NetworkManager based dispatcher
- include: ./network-manager.yml
when: network_manager_active | bool
diff --git a/roles/openshift_projects/tasks/main.yml b/roles/openshift_projects/tasks/main.yml
index 62a357cf7..30d58afd3 100644
--- a/roles/openshift_projects/tasks/main.yml
+++ b/roles/openshift_projects/tasks/main.yml
@@ -20,7 +20,7 @@
- name: Create projects
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
new-project {{ item.item.key }}
{% if item.item.value.default_node_selector | default(none) != none %}
{{ '--node-selector=' ~ item.item.value.default_node_selector }}
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index e90384d37..1ff9e6dcb 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -26,7 +26,7 @@
- name: Grant the user access to the appropriate scc
command: >
- {{ openshift.common.admin_binary }} policy add-scc-to-user
+ {{ openshift.common.client_binary }} adm policy add-scc-to-user
{{ item.1.item }} system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}
when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users | default([]) }}"
with_nested:
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index 8e2702391..718537287 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -37,3 +37,8 @@
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
when: openshift_version is defined and openshift_version.split('.') | length == 2
+# We finally have the specific version. Now we clean up any strange
+# dangly +c0mm1t-offset tags in the version. See also,
+# openshift_facts.py
+- set_fact:
+ openshift_version: "{{ openshift_version | oo_chomp_commit_offset }}"
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
index 187d74b06..c6c70b81d 100644
--- a/roles/os_firewall/README.md
+++ b/roles/os_firewall/README.md
@@ -14,7 +14,7 @@ Role Variables
| Name | Default | |
|---------------------------|---------|----------------------------------------|
-| os_firewall_use_firewalld | True | If false, use iptables |
+| os_firewall_use_firewalld | False | If false, use iptables |
| os_firewall_allow | [] | List of service,port mappings to allow |
| os_firewall_deny | [] | List of service, port mappings to deny |
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 190016c14..bd638b69b 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -50,8 +50,8 @@ class IpTablesCreateJumpRuleError(IpTablesError):
self.chain = chain
-# TODO: impliment rollbacks for any events that where successful and an
-# exception was thrown later. for example, when the chain is created
+# TODO: implement rollbacks for any events that were successful and an
+# exception was thrown later. For example, when the chain is created
# successfully, but the add/remove rule fails.
class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml
index c93335b7b..6df7c9f2b 100644
--- a/roles/os_firewall/meta/main.yml
+++ b/roles/os_firewall/meta/main.yml
@@ -11,5 +11,6 @@ galaxy_info:
- 7
categories:
- system
+allow_duplicates: yes
dependencies:
- { role: openshift_facts }
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 000000000..dd2913b35
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[nosetests]
+tests=test,utils
diff --git a/utils/Makefile b/utils/Makefile
index 59aff92fd..62f08f74b 100644
--- a/utils/Makefile
+++ b/utils/Makefile
@@ -31,6 +31,8 @@ ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
MANPAGES := docs/man/man1/atomic-openshift-installer.1
VERSION := 1.3
+PEPEXCLUDES := E501,E121,E124
+
sdist: clean
python setup.py sdist
rm -fR $(SHORTNAME).egg-info
@@ -80,7 +82,7 @@ ci-pylint:
@echo "#############################################"
@echo "# Running PyLint Tests in virtualenv"
@echo "#############################################"
- . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py ../callback_plugins/openshift_quick_installer.py
+ . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py ../callback_plugins/openshift_quick_installer.py ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
ci-list-deps:
@echo "#############################################"
@@ -94,13 +96,17 @@ ci-pyflakes:
@echo "#################################################"
. $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py
. $(NAME)env/bin/activate && pyflakes ../callback_plugins/openshift_quick_installer.py
+ . $(NAME)env/bin/activate && pyflakes ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
ci-pep8:
@echo "#############################################"
@echo "# Running PEP8 Compliance Tests in virtualenv"
@echo "#############################################"
- . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/
- . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 ../callback_plugins/openshift_quick_installer.py
+ . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES) src/$(SHORTNAME)/
+ . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES) ../callback_plugins/openshift_quick_installer.py
+# This one excludes E402 because it is an ansible module and the
+# boilerplate import statement is expected to be at the bottom
+ . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES),E402 ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests
:
diff --git a/utils/docs/man/man1/atomic-openshift-installer.1 b/utils/docs/man/man1/atomic-openshift-installer.1
index 4da82191b..072833ce8 100644
--- a/utils/docs/man/man1/atomic-openshift-installer.1
+++ b/utils/docs/man/man1/atomic-openshift-installer.1
@@ -2,12 +2,12 @@
.\" Title: atomic-openshift-installer
.\" Author: [see the "AUTHOR" section]
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
-.\" Date: 09/28/2016
+.\" Date: 10/20/2016
.\" Manual: atomic-openshift-installer
.\" Source: atomic-openshift-utils 1.3
.\" Language: English
.\"
-.TH "ATOMIC\-OPENSHIFT\-I" "1" "09/28/2016" "atomic\-openshift\-utils 1\&.3" "atomic\-openshift\-installer"
+.TH "ATOMIC\-OPENSHIFT\-I" "1" "10/20/2016" "atomic\-openshift\-utils 1\&.3" "atomic\-openshift\-installer"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -121,6 +121,17 @@ Show the usage help and exit\&.
\fBupgrade\fR
.RE
.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+\fBscaleup\fR
+.RE
+.sp
The options specific to each command are described in the following sections\&.
.SH "INSTALL"
.sp
@@ -158,6 +169,9 @@ Upgrade to the latest major version\&. For example, if you are running version
then this could upgrade you to
\fB3\&.3\fR\&.
.RE
+.SH "SCALEUP"
+.sp
+The \fBscaleup\fR command is used to add new nodes to an existing cluster\&. This command has no additional options\&.
.SH "FILES"
.sp
\fB~/\&.config/openshift/installer\&.cfg\&.yml\fR \(em Installer configuration file\&. Can be used to generate an inventory later or start an unattended installation\&.
diff --git a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
index 64e5d14a3..9b02c4d14 100644
--- a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
+++ b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
@@ -73,6 +73,7 @@ COMMANDS
* **install**
* **uninstall**
* **upgrade**
+* **scaleup**
The options specific to each command are described in the following
sections.
@@ -122,6 +123,11 @@ Upgrade to the latest major version. For example, if you are running
version **3.2** then this could upgrade you to **3.3**.
+SCALEUP
+-------
+
+The **scaleup** command is used to add new nodes to an existing cluster.
+This command has no additional options.
FILES
-----
diff --git a/utils/setup.py b/utils/setup.py
index 563897bb1..7909321c9 100644
--- a/utils/setup.py
+++ b/utils/setup.py
@@ -65,11 +65,6 @@ setup(
'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
},
- # Although 'package_data' is the preferred approach, in some case you may
- # need to place data files outside of your packages. See:
- # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
- # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
- #data_files=[('my_data', ['data/data_file'])],
tests_require=['nose'],
test_suite='nose.collector',
diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh
index 3847c029a..3c5614d39 100755
--- a/utils/site_assets/oo-install-bootstrap.sh
+++ b/utils/site_assets/oo-install-bootstrap.sh
@@ -67,7 +67,7 @@ pip install --no-index -f file:///$(readlink -f deps) ansible 2>&1 >> $OO_INSTAL
# TODO: these deps should technically be handled as part of installing ooinstall
pip install --no-index -f file:///$(readlink -f deps) click 2>&1 >> $OO_INSTALL_LOG
pip install --no-index ./src/ 2>&1 >> $OO_INSTALL_LOG
-echo "Installation preperation done!" 2>&1 >> $OO_INSTALL_LOG
+echo "Installation preparation done!" 2>&1 >> $OO_INSTALL_LOG
echo "Using `ansible --version`" 2>&1 >> $OO_INSTALL_LOG
diff --git a/utils/src/data/data_file b/utils/src/data/data_file
deleted file mode 100644
index 7c0646bfd..000000000
--- a/utils/src/data/data_file
+++ /dev/null
@@ -1 +0,0 @@
-some data \ No newline at end of file
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 85f18d5d3..7e5ad4144 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -1,28 +1,24 @@
-# TODO: Temporarily disabled due to importing old code into openshift-ansible
-# repo. We will work on these over time.
-# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter,too-many-lines
+# pylint: disable=missing-docstring,no-self-use,no-value-for-parameter,too-many-lines
+import logging
import os
-import re
import sys
-import logging
+
import click
from pkg_resources import parse_version
-from ooinstall import openshift_ansible
-from ooinstall.oo_config import OOConfig
-from ooinstall.oo_config import OOConfigInvalidHostError
-from ooinstall.oo_config import Host, Role
+from ooinstall import openshift_ansible, utils
+from ooinstall.oo_config import Host, OOConfig, OOConfigInvalidHostError, Role
from ooinstall.variants import find_variant, get_variant_version_combos
-installer_log = logging.getLogger('installer')
-installer_log.setLevel(logging.CRITICAL)
-installer_file_handler = logging.FileHandler('/tmp/installer.txt')
-installer_file_handler.setFormatter(
+INSTALLER_LOG = logging.getLogger('installer')
+INSTALLER_LOG.setLevel(logging.CRITICAL)
+INSTALLER_FILE_HANDLER = logging.FileHandler('/tmp/installer.txt')
+INSTALLER_FILE_HANDLER.setFormatter(
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# Example output:
# 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts'
-installer_file_handler.setLevel(logging.DEBUG)
-installer_log.addHandler(installer_file_handler)
+INSTALLER_FILE_HANDLER.setLevel(logging.DEBUG)
+INSTALLER_LOG.addHandler(INSTALLER_FILE_HANDLER)
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
@@ -47,6 +43,16 @@ UPGRADE_MAPPINGS = {
'major_playbook': 'v3_3/upgrade.yml',
'major_version': '3.3',
},
+ '3.3': {
+ 'minor_version': '3.3',
+ 'minor_playbook': 'v3_3/upgrade.yml',
+ 'major_playbook': 'v3_4/upgrade.yml',
+ 'major_version': '3.4',
+ },
+ '3.4': {
+ 'minor_version': '3.4',
+ 'minor_playbook': 'v3_4/upgrade.yml',
+ },
}
@@ -58,17 +64,8 @@ def validate_ansible_dir(path):
# raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
-def is_valid_hostname(hostname):
- if not hostname or len(hostname) > 255:
- return False
- if hostname[-1] == ".":
- hostname = hostname[:-1] # strip exactly one dot from the right, if present
- allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
- return all(allowed.match(x) for x in hostname.split("."))
-
-
def validate_prompt_hostname(hostname):
- if hostname == '' or is_valid_hostname(hostname):
+ if hostname == '' or utils.is_valid_hostname(hostname):
return hostname
raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
@@ -84,7 +81,7 @@ passwordless sudo access.
return click.prompt('User for ssh access', default='root')
-def get_master_routingconfig_subdomain():
+def get_routingconfig_subdomain():
click.clear()
message = """
You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value.
@@ -121,11 +118,6 @@ a high-availability (HA) deployment. If you choose an HA deployment, then you
are prompted to identify a *separate* system to act as the load balancer for
your cluster once you define all masters and nodes.
-If only one master is specified, an etcd instance is embedded within the
-OpenShift master service to use as the datastore. This can be later replaced
-with a separate etcd instance, if required. If multiple masters are specified,
-then a separate etcd cluster is configured with each master serving as a member.
-
Any masters configured as part of this installation process are also
configured as nodes. This enables the master to proxy to pods
from the API. By default, this node is unschedulable, but this can be changed
@@ -183,9 +175,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
- if num_masters >= 3:
- collect_master_lb(hosts)
- roles.add('master_lb')
+ if num_masters > 2:
+ master_lb = collect_master_lb(hosts)
+ if master_lb:
+ hosts.append(master_lb)
+ roles.add('master_lb')
+ else:
+ set_cluster_hostname(oo_cfg)
if not existing_env:
collect_storage_host(hosts)
@@ -193,7 +189,8 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
return hosts, roles
-def print_installation_summary(hosts, version=None):
+# pylint: disable=too-many-branches
+def print_installation_summary(hosts, version=None, verbose=True):
"""
Displays a summary of all hosts configured thus far, and what role each
will play.
@@ -214,35 +211,36 @@ def print_installation_summary(hosts, version=None):
click.echo('Total OpenShift masters: %s' % len(masters))
click.echo('Total OpenShift nodes: %s' % len(nodes))
- if len(masters) == 1 and version != '3.0':
- ha_hint_message = """
+ if verbose:
+ if len(masters) == 1 and version != '3.0':
+ ha_hint_message = """
NOTE: Add a total of 3 or more masters to perform an HA installation."""
- click.echo(ha_hint_message)
- elif len(masters) == 2:
- min_masters_message = """
+ click.echo(ha_hint_message)
+ elif len(masters) == 2:
+ min_masters_message = """
WARNING: A minimum of 3 masters are required to perform an HA installation.
Please add one more to proceed."""
- click.echo(min_masters_message)
- elif len(masters) >= 3:
- ha_message = """
+ click.echo(min_masters_message)
+ elif len(masters) >= 3:
+ ha_message = """
NOTE: Multiple masters specified, this will be an HA deployment with a separate
etcd cluster. You will be prompted to provide the FQDN of a load balancer and
a host for storage once finished entering hosts.
-"""
- click.echo(ha_message)
+ """
+ click.echo(ha_message)
- dedicated_nodes_message = """
+ dedicated_nodes_message = """
WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated
nodes are specified, each configured master will be marked as a schedulable
node."""
- min_ha_nodes_message = """
+ min_ha_nodes_message = """
WARNING: A minimum of 3 dedicated nodes are recommended for an HA
deployment."""
- if len(dedicated_nodes) == 0:
- click.echo(dedicated_nodes_message)
- elif len(dedicated_nodes) < 3:
- click.echo(min_ha_nodes_message)
+ if len(dedicated_nodes) == 0:
+ click.echo(dedicated_nodes_message)
+ elif len(dedicated_nodes) < 3:
+ click.echo(min_ha_nodes_message)
click.echo('')
@@ -263,13 +261,12 @@ def print_host_summary(all_hosts, host):
click.echo(" - Load Balancer (Preconfigured)")
else:
click.echo(" - Load Balancer (HAProxy)")
- if host.is_master():
- if host.is_etcd_member(all_hosts):
- click.echo(" - Etcd Member")
- else:
- click.echo(" - Etcd (Embedded)")
+ if host.is_etcd():
+ click.echo(" - Etcd")
if host.is_storage():
click.echo(" - Storage")
+ if host.new_host:
+ click.echo(" - NEW")
def collect_master_lb(hosts):
@@ -307,14 +304,35 @@ hostname.
'please specify a separate host' % hostname)
return hostname
- host_props['connect_to'] = click.prompt('Enter hostname or IP address',
- value_proc=validate_prompt_lb)
- install_haproxy = \
- click.confirm('Should the reference HAProxy load balancer be installed on this host?')
- host_props['preconfigured'] = not install_haproxy
- host_props['roles'] = ['master_lb']
- master_lb = Host(**host_props)
- hosts.append(master_lb)
+ lb_hostname = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_lb)
+ if lb_hostname:
+ host_props['connect_to'] = lb_hostname
+ install_haproxy = \
+ click.confirm('Should the reference HAProxy load balancer be installed on this host?')
+ host_props['preconfigured'] = not install_haproxy
+ host_props['roles'] = ['master_lb']
+ return Host(**host_props)
+ else:
+ return None
+
+
+def set_cluster_hostname(oo_cfg):
+ first_master = next((host for host in oo_cfg.deployment.hosts if host.is_master()), None)
+ message = """
+You have chosen to install a single master cluster (non-HA).
+
+In a single master cluster, the cluster host name (Ansible variable openshift_master_cluster_public_hostname) is set by default to the host name of the single master. In a multiple master (HA) cluster, the FQDN of a host must be provided that will be configured as a proxy. This could be either an existing load balancer configured to balance all masters on
+port 8443 or a new host that would have HAProxy installed on it.
+
+(Optional)
+If you want to override the cluster host name now to something other than the default (the host name of the single master), or if you think you might add masters later to become an HA cluster and want to future proof your cluster host name choice, please provide a FQDN. Otherwise, press ENTER to continue and accept the default.
+"""
+ click.echo(message)
+ cluster_hostname = click.prompt('Enter hostname or IP address',
+ default=str(first_master))
+ oo_cfg.deployment.variables['openshift_master_cluster_hostname'] = cluster_hostname
+ oo_cfg.deployment.variables['openshift_master_cluster_public_hostname'] = cluster_hostname
def collect_storage_host(hosts):
@@ -395,29 +413,29 @@ Notes:
default_facts_lines = []
default_facts = {}
- for h in hosts:
- if h.preconfigured:
+ for host in hosts:
+ if host.preconfigured:
continue
try:
- default_facts[h.connect_to] = {}
- h.ip = callback_facts[h.connect_to]["common"]["ip"]
- h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
- h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
- h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
+ default_facts[host.connect_to] = {}
+ host.ip = callback_facts[host.connect_to]["common"]["ip"]
+ host.public_ip = callback_facts[host.connect_to]["common"]["public_ip"]
+ host.hostname = callback_facts[host.connect_to]["common"]["hostname"]
+ host.public_hostname = callback_facts[host.connect_to]["common"]["public_hostname"]
except KeyError:
- click.echo("Problem fetching facts from {}".format(h.connect_to))
+ click.echo("Problem fetching facts from {}".format(host.connect_to))
continue
- default_facts_lines.append(",".join([h.connect_to,
- h.ip,
- h.public_ip,
- h.hostname,
- h.public_hostname]))
- output = "%s\n%s" % (output, ",".join([h.connect_to,
- h.ip,
- h.public_ip,
- h.hostname,
- h.public_hostname]))
+ default_facts_lines.append(",".join([host.connect_to,
+ host.ip,
+ host.public_ip,
+ host.hostname,
+ host.public_hostname]))
+ output = "%s\n%s" % (output, ",".join([host.connect_to,
+ host.ip,
+ host.public_ip,
+ host.hostname,
+ host.public_hostname]))
output = "%s\n%s" % (output, notes)
click.echo(output)
@@ -534,7 +552,7 @@ def error_if_missing_info(oo_cfg):
oo_cfg.settings['variant_version'] = version.name
# check that all listed host roles are included
- listed_roles = get_host_roles_set(oo_cfg)
+ listed_roles = oo_cfg.get_host_roles_set()
configured_roles = set([role for role in oo_cfg.deployment.roles])
if listed_roles != configured_roles:
missing_info = True
@@ -544,16 +562,7 @@ def error_if_missing_info(oo_cfg):
sys.exit(1)
-def get_host_roles_set(oo_cfg):
- roles_set = set()
- for host in oo_cfg.deployment.hosts:
- for role in host.roles:
- roles_set.add(role)
-
- return roles_set
-
-
-def get_proxy_hostnames_and_excludes():
+def get_proxy_hosts_excludes():
message = """
If a proxy is needed to reach HTTP and HTTPS traffic, please enter the
name below. This proxy will be configured by default for all processes
@@ -635,7 +644,8 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
click.clear()
if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables:
- oo_cfg.deployment.variables['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain()
+ oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \
+ get_routingconfig_subdomain()
click.clear()
# Are any proxy vars already presisted?
@@ -644,7 +654,7 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
saved_proxy_vars = [pv for pv in proxy_vars
if oo_cfg.deployment.variables.get(pv, 'UNSET') is not 'UNSET']
- installer_log.debug("Evaluated proxy settings, found %s presisted values",
+ INSTALLER_LOG.debug("Evaluated proxy settings, found %s presisted values",
len(saved_proxy_vars))
current_version = parse_version(
oo_cfg.settings.get('variant_version', '0.0'))
@@ -654,8 +664,8 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
# recognizes proxy parameters. We must prompt the user for values
# if this conditional is true.
if not saved_proxy_vars and current_version >= min_version:
- installer_log.debug("Prompting user to enter proxy values")
- http_proxy, https_proxy, proxy_excludes = get_proxy_hostnames_and_excludes()
+ INSTALLER_LOG.debug("Prompting user to enter proxy values")
+ http_proxy, https_proxy, proxy_excludes = get_proxy_hosts_excludes()
oo_cfg.deployment.variables['proxy_http'] = http_proxy
oo_cfg.deployment.variables['proxy_https'] = https_proxy
oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes
@@ -695,8 +705,10 @@ def get_installed_hosts(hosts, callback_facts):
for host in [h for h in hosts if h.is_master() or h.is_node()]:
if host.connect_to in callback_facts.keys():
if is_installed_host(host, callback_facts):
+ INSTALLER_LOG.debug("%s is already installed", str(host))
installed_hosts.append(host)
else:
+ INSTALLER_LOG.debug("%s is not installed", str(host))
uninstalled_hosts.append(host)
return installed_hosts, uninstalled_hosts
@@ -709,82 +721,85 @@ def is_installed_host(host, callback_facts):
return version_found
-# pylint: disable=too-many-branches
-# This pylint error will be corrected shortly in separate PR.
-def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
-
- # Copy the list of existing hosts so we can remove any already installed nodes.
- hosts_to_run_on = list(oo_cfg.deployment.hosts)
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
+ """
+ We get here once there are hosts in oo_cfg and we need to find out what
+ state they are in. There are several different cases that might occur:
+
+ 1. All hosts in oo_cfg are uninstalled. In this case, we should proceed
+ with a normal installation.
+ 2. All hosts in oo_cfg are installed. In this case, ask the user if they
+ want to force reinstall or exit. We can also hint in this case about
+ the scaleup workflow.
+ 3. Some hosts are installed and some are uninstalled. In this case, prompt
+ the user if they want to force (re)install all hosts specified or direct
+ them to the scaleup workflow and exit.
+ """
+ hosts_to_run_on = []
# Check if master or nodes already have something installed
- installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts)
- if len(installed_hosts) > 0:
- click.echo('Installed environment detected.')
- # This check has to happen before we start removing hosts later in this method
+ installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts,
+ callback_facts)
+ nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()]
+ masters_and_nodes = [host for host in oo_cfg.deployment.hosts if host.is_master() or host.is_node()]
+
+ in_hosts = [str(h) for h in installed_hosts]
+ un_hosts = [str(h) for h in uninstalled_hosts]
+ all_hosts = [str(h) for h in oo_cfg.deployment.hosts]
+ m_and_n = [str(h) for h in masters_and_nodes]
+
+ INSTALLER_LOG.debug("installed hosts: %s", ", ".join(in_hosts))
+ INSTALLER_LOG.debug("uninstalled hosts: %s", ", ".join(un_hosts))
+ INSTALLER_LOG.debug("deployment hosts: %s", ", ".join(all_hosts))
+ INSTALLER_LOG.debug("masters and nodes: %s", ", ".join(m_and_n))
+
+ # Case (1): All uninstalled hosts
+ if len(uninstalled_hosts) == len(nodes):
+ click.echo('All hosts in config are uninstalled. Proceeding with installation...')
+ hosts_to_run_on = list(oo_cfg.deployment.hosts)
+ else:
+ # Case (2): All installed hosts
+ if len(installed_hosts) == len(masters_and_nodes):
+ message = """
+All specified hosts in specified environment are installed.
+"""
+ # Case (3): Some installed, some uninstalled
+ else:
+ message = """
+A mix of installed and uninstalled hosts have been detected in your environment.
+Please make sure your environment was installed successfully before adding new nodes.
+"""
+
+ # Still inside the case 2/3 else condition
+ mixed_msg = """
+\tInstalled hosts:
+\t\t{inst_hosts}
+
+\tUninstalled hosts:
+\t\t{uninst_hosts}""".format(inst_hosts=", ".join(in_hosts), uninst_hosts=", ".join(un_hosts))
+ click.echo(mixed_msg)
+
+ # Out of the case 2/3 if/else
+ click.echo(message)
+
+ if not unattended:
+ response = click.confirm('Do you want to (re)install the environment?\n\n'
+ 'Note: This will potentially erase any custom changes.')
+ if response:
+ hosts_to_run_on = list(oo_cfg.deployment.hosts)
+ force = True
+ elif unattended and force:
+ hosts_to_run_on = list(oo_cfg.deployment.hosts)
if not force:
- if not unattended:
- click.echo('By default the installer only adds new nodes '
- 'to an installed environment.')
- response = click.prompt('Do you want to (1) only add additional nodes or '
- '(2) reinstall the existing hosts '
- 'potentially erasing any custom changes?',
- type=int)
- # TODO: this should be reworked with error handling.
- # Click can certainly do this for us.
- # This should be refactored as soon as we add a 3rd option.
- if response == 1:
- force = False
- if response == 2:
- force = True
-
- # present a message listing already installed hosts and remove hosts if needed
- for host in installed_hosts:
- if host.is_master():
- click.echo("{} is already an OpenShift master".format(host))
- # Masters stay in the list, we need to run against them when adding
- # new nodes.
- elif host.is_node():
- click.echo("{} is already an OpenShift node".format(host))
- # force is only used for reinstalls so we don't want to remove
- # anything.
- if not force:
- hosts_to_run_on.remove(host)
-
- # Handle the cases where we know about uninstalled systems
- # TODO: This logic is getting hard to understand.
- # we should revise all this to be cleaner.
- if not force and len(uninstalled_hosts) > 0:
- for uninstalled_host in uninstalled_hosts:
- click.echo("{} is currently uninstalled".format(uninstalled_host))
- # Fall through
- click.echo('\nUninstalled hosts have been detected in your environment. '
- 'Please make sure your environment was installed successfully '
- 'before adding new nodes. If you want a fresh install, use '
- '`atomic-openshift-installer install --force`')
+ message = """
+If you want to force reinstall of your environment, run:
+`atomic-openshift-installer install --force`
+
+If you want to add new nodes to this environment, run:
+`atomic-openshift-installer scaleup`
+"""
+ click.echo(message)
sys.exit(1)
- else:
- if unattended:
- if not force:
- click.echo('Installed environment detected and no additional '
- 'nodes specified: aborting. If you want a fresh install, use '
- '`atomic-openshift-installer install --force`')
- sys.exit(1)
- else:
- if not force:
- new_nodes = collect_new_nodes(oo_cfg)
-
- hosts_to_run_on.extend(new_nodes)
- oo_cfg.deployment.hosts.extend(new_nodes)
-
- openshift_ansible.set_config(oo_cfg)
- click.echo('Gathering information from hosts...')
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose)
- if error or callback_facts is None:
- click.echo("There was a problem fetching the required information. See "
- "{} for details.".format(oo_cfg.settings['ansible_log_path']))
- sys.exit(1)
- else:
- pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
@@ -800,6 +815,49 @@ def set_infra_nodes(hosts):
host.node_labels = "{'region': 'infra'}"
+def run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory):
+ # Write Ansible inventory file to disk:
+ inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on)
+
+ click.echo()
+ click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path)
+ click.echo("Wrote Ansible inventory: %s" % inventory_file)
+ click.echo()
+
+ if gen_inventory:
+ sys.exit(0)
+
+ click.echo('Ready to run installation process.')
+ message = """
+If changes are needed please edit the config file above and re-run.
+"""
+ if not unattended:
+ confirm_continue(message)
+
+ error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts,
+ hosts_to_run_on, verbose)
+
+ if error:
+ # The bootstrap script will print out the log location.
+ message = """
+An error was detected. After resolving the problem please relaunch the
+installation process.
+"""
+ click.echo(message)
+ sys.exit(1)
+ else:
+ message = """
+The installation was successful!
+
+If this is your first time installing please take a look at the Administrator
+Guide for advanced options related to routing, storage, authentication, and
+more:
+
+http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
+"""
+ click.echo(message)
+
+
@click.group()
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@@ -846,8 +904,8 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_
# highest), anything below that (we only use debug/warning
# presently) is not logged. If '-d' is given though, we'll
# lower the threshold to debug (almost everything gets through)
- installer_log.setLevel(logging.DEBUG)
- installer_log.debug("Quick Installer debugging initialized")
+ INSTALLER_LOG.setLevel(logging.DEBUG)
+ INSTALLER_LOG.debug("Quick Installer debugging initialized")
ctx.obj = {}
ctx.obj['unattended'] = unattended
@@ -857,8 +915,8 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_
try:
oo_cfg = OOConfig(ctx.obj['configuration'])
- except OOConfigInvalidHostError as e:
- click.echo(e)
+ except OOConfigInvalidHostError as err:
+ click.echo(err)
sys.exit(1)
# If no playbook dir on the CLI, check the config:
@@ -916,7 +974,7 @@ def uninstall(ctx):
@click.option('--latest-minor', '-l', is_flag=True, default=False)
@click.option('--next-major', '-n', is_flag=True, default=False)
@click.pass_context
-# pylint: disable=too-many-statements
+# pylint: disable=too-many-statements,too-many-branches
def upgrade(ctx, latest_minor, next_major):
oo_cfg = ctx.obj['oo_cfg']
@@ -969,7 +1027,7 @@ def upgrade(ctx, latest_minor, next_major):
sys.exit(0)
playbook = mapping['major_playbook']
new_version = mapping['major_version']
- # Update config to reflect the version we're targetting, we'll write
+ # Update config to reflect the version we're targeting, we'll write
# to disk once Ansible completes successfully, not before.
oo_cfg.settings['variant_version'] = new_version
if oo_cfg.settings['variant'] == 'enterprise':
@@ -1013,15 +1071,17 @@ def upgrade(ctx, latest_minor, next_major):
def install(ctx, force, gen_inventory):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
+ unattended = ctx.obj['unattended']
- if ctx.obj['unattended']:
+ if unattended:
error_if_missing_info(oo_cfg)
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
- check_hosts_config(oo_cfg, ctx.obj['unattended'])
+ check_hosts_config(oo_cfg, unattended)
- print_installation_summary(oo_cfg.deployment.hosts, oo_cfg.settings.get('variant_version', None))
+ print_installation_summary(oo_cfg.deployment.hosts,
+ oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
verbose)
@@ -1031,62 +1091,92 @@ def install(ctx, force, gen_inventory):
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
- hosts_to_run_on, callback_facts = get_hosts_to_run_on(
- oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
+ hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg,
+ callback_facts,
+ unattended,
+ force)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
- # TODO: if there are *new* nodes and this is a live install, we may need the user
- # to confirm the settings for new nodes. Look into this once we're distinguishing
- # between new and pre-existing nodes.
if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
# Write quick installer config file to disk:
oo_cfg.save_to_disk()
- # Write Ansible inventory file to disk:
- inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on)
+ run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
- click.echo()
- click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path)
- click.echo("Wrote Ansible inventory: %s" % inventory_file)
- click.echo()
- if gen_inventory:
- sys.exit(0)
+@click.command()
+@click.option('--gen-inventory', is_flag=True, default=False,
+ help="Generate an Ansible inventory file and exit.")
+@click.pass_context
+def scaleup(ctx, gen_inventory):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+ unattended = ctx.obj['unattended']
- click.echo('Ready to run installation process.')
+ installed_hosts = list(oo_cfg.deployment.hosts)
+
+ if len(installed_hosts) == 0:
+ click.echo('No hosts specified.')
+ sys.exit(1)
+
+ click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.')
+
+ print_installation_summary(installed_hosts,
+ oo_cfg.settings['variant_version'],
+ verbose=False,)
message = """
-If changes are needed please edit the config file above and re-run.
-"""
- if not ctx.obj['unattended']:
- confirm_continue(message)
+---
- error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts,
- hosts_to_run_on, verbose)
+We have detected this previously installed OpenShift environment.
- if error:
- # The bootstrap script will print out the log location.
- message = """
-An error was detected. After resolving the problem please relaunch the
-installation process.
+This tool will guide you through the process of adding additional
+nodes to your cluster.
"""
- click.echo(message)
+ confirm_continue(message)
+
+ error_if_missing_info(oo_cfg)
+ check_hosts_config(oo_cfg, True)
+
+ installed_masters = [host for host in installed_hosts if host.is_master()]
+ new_nodes = collect_new_nodes(oo_cfg)
+
+ oo_cfg.deployment.hosts.extend(new_nodes)
+ hosts_to_run_on = installed_masters + new_nodes
+
+ openshift_ansible.set_config(oo_cfg)
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose)
+ if error or callback_facts is None:
+ click.echo("There was a problem fetching the required information. See "
+ "{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
- else:
- message = """
-The installation was successful!
-If this is your first time installing please take a look at the Administrator
-Guide for advanced options related to routing, storage, authentication, and
-more:
+ print_installation_summary(oo_cfg.deployment.hosts,
+ oo_cfg.settings.get('variant_version', None))
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
+ verbose)
+
+ if error or callback_facts is None:
+ click.echo("There was a problem fetching the required information. "
+ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+
+ # We already verified this is not the case for unattended installs, so this can
+ # only trigger for live CLI users:
+ if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
+ confirm_hosts_facts(oo_cfg, callback_facts)
+
+ # Write quick installer config file to disk:
+ oo_cfg.save_to_disk()
+ run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
-http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
-"""
- click.echo(message)
cli.add_command(install)
+cli.add_command(scaleup)
cli.add_command(upgrade)
cli.add_command(uninstall)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 697ac9c08..64eb340f3 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -120,6 +120,10 @@ class Host(object):
def is_storage(self):
return 'storage' in self.roles
+ def is_etcd(self):
+ """ Does this host have the etcd role """
+ return 'etcd' in self.roles
+
def is_etcd_member(self, all_hosts):
""" Will this host be a member of a standalone etcd cluster. """
if not self.is_master():
@@ -436,3 +440,11 @@ class OOConfig(object):
if host.connect_to == name:
return host
return None
+
+ def get_host_roles_set(self):
+ roles_set = set()
+ for host in self.deployment.hosts:
+ for role in host.roles:
+ roles_set.add(role)
+
+ return roles_set
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 80a79a6d2..f542fb376 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -48,9 +48,6 @@ def set_config(cfg):
def generate_inventory(hosts):
global CFG
- masters = [host for host in hosts if host.is_master()]
- multiple_masters = len(masters) > 1
-
new_nodes = [host for host in hosts if host.is_node() and host.new_host]
scaleup = len(new_nodes) > 0
@@ -61,7 +58,7 @@ def generate_inventory(hosts):
write_inventory_children(base_inventory, scaleup)
- write_inventory_vars(base_inventory, multiple_masters, lb)
+ write_inventory_vars(base_inventory, lb)
# write_inventory_hosts
for role in CFG.deployment.roles:
@@ -106,7 +103,7 @@ def write_inventory_children(base_inventory, scaleup):
# pylint: disable=too-many-branches
-def write_inventory_vars(base_inventory, multiple_masters, lb):
+def write_inventory_vars(base_inventory, lb):
global CFG
base_inventory.write('\n[OSEv3:vars]\n')
@@ -123,7 +120,7 @@ def write_inventory_vars(base_inventory, multiple_masters, lb):
if CFG.deployment.variables['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=yes\n')
- if multiple_masters and lb is not None:
+ if lb is not None:
base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))
base_inventory.write(
@@ -317,6 +314,10 @@ def run_uninstall_playbook(hosts, verbose=False):
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ # override the ansible config for our main playbook run
+ if 'ansible_quiet_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
+
return run_ansible(playbook, inventory_file, facts_env, verbose)
@@ -331,4 +332,8 @@ def run_upgrade_playbook(hosts, playbook, verbose=False):
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ # override the ansible config for our main playbook run
+ if 'ansible_quiet_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
+
return run_ansible(playbook, inventory_file, facts_env, verbose)
diff --git a/utils/src/ooinstall/utils.py b/utils/src/ooinstall/utils.py
index eb27a57e4..85a77c75e 100644
--- a/utils/src/ooinstall/utils.py
+++ b/utils/src/ooinstall/utils.py
@@ -1,4 +1,6 @@
import logging
+import re
+
installer_log = logging.getLogger('installer')
@@ -8,3 +10,12 @@ def debug_env(env):
if k.startswith("OPENSHIFT") or k.startswith("ANSIBLE") or k.startswith("OO"):
installer_log.debug("{key}: {value}".format(
key=k, value=env[k]))
+
+
+def is_valid_hostname(hostname):
+ if not hostname or len(hostname) > 255:
+ return False
+ if hostname[-1] == ".":
+ hostname = hostname[:-1] # strip exactly one dot from the right, if present
+ allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
+ return all(allowed.match(x) for x in hostname.split("."))
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index 6993794fe..39772bb2e 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -40,24 +40,25 @@ class Variant(object):
# WARNING: Keep the versions ordered, most recent first:
OSE = Variant('openshift-enterprise', 'OpenShift Container Platform',
[
- Version('3.3', 'openshift-enterprise'),
+ Version('3.4', 'openshift-enterprise'),
]
)
REG = Variant('openshift-enterprise', 'Registry',
[
- Version('3.3', 'openshift-enterprise', 'registry'),
+ Version('3.4', 'openshift-enterprise', 'registry'),
]
)
origin = Variant('origin', 'OpenShift Origin',
[
- Version('1.2', 'origin'),
+ Version('1.4', 'origin'),
]
)
LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform',
[
+ Version('3.3', 'openshift-enterprise'),
Version('3.2', 'openshift-enterprise'),
Version('3.1', 'openshift-enterprise'),
Version('3.0', 'openshift-enterprise'),
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
index f2216a177..af91ab6a7 100644
--- a/utils/test-requirements.txt
+++ b/utils/test-requirements.txt
@@ -9,3 +9,4 @@ flake8
PyYAML
click
backports.functools_lru_cache
+pyOpenSSL
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 34392777b..36dc18034 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -842,7 +842,7 @@ class AttendedCliTests(OOCliFixture):
# interactive with config file and some installed some uninstalled hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_add_nodes(self, load_facts_mock, run_playbook_mock):
+ def test_scaleup_hint(self, load_facts_mock, run_playbook_mock):
# Modify the mock facts to return a version indicating OpenShift
# is already installed on our master, and the first node.
@@ -866,13 +866,12 @@ class AttendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli,
self.cli_args,
input=cli_input)
- self.assert_result(result, 0)
- self._verify_load_facts(load_facts_mock)
- self._verify_run_playbook(run_playbook_mock, 3, 2)
+ # This is testing the install workflow so we want to make sure we
+ # exit with the appropriate hint.
+ self.assertTrue('scaleup' in result.output)
+ self.assert_result(result, 1)
- written_config = read_yaml(self.config_file)
- self._verify_config_hosts(written_config, 3)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
@@ -897,30 +896,30 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(config_file)
self._verify_config_hosts(written_config, 3)
- #interactive with config file and all installed hosts
- @patch('ooinstall.openshift_ansible.run_main_playbook')
- @patch('ooinstall.openshift_ansible.load_system_facts')
- def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
- mock_facts = copy.deepcopy(MOCK_FACTS)
- mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
- mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
-
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ],
- add_nodes=[('10.0.0.2', False, False)],
- ssh_user='root',
- variant_num=1,
- schedulable_masters_ok=True,
- confirm_facts='y',
- storage='10.0.0.1',)
-
- self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
- run_playbook_mock,
- cli_input,
- exp_hosts_len=2,
- exp_hosts_to_run_on_len=2,
- force=False)
+# #interactive with config file and all installed hosts
+# @patch('ooinstall.openshift_ansible.run_main_playbook')
+# @patch('ooinstall.openshift_ansible.load_system_facts')
+# def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
+# mock_facts = copy.deepcopy(MOCK_FACTS)
+# mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+# mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+#
+# cli_input = build_input(hosts=[
+# ('10.0.0.1', True, False),
+# ],
+# add_nodes=[('10.0.0.2', False, False)],
+# ssh_user='root',
+# variant_num=1,
+# schedulable_masters_ok=True,
+# confirm_facts='y',
+# storage='10.0.0.1',)
+#
+# self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
+# run_playbook_mock,
+# cli_input,
+# exp_hosts_len=2,
+# exp_hosts_to_run_on_len=2,
+# force=False)
#interactive multimaster: one more node than master
@patch('ooinstall.openshift_ansible.run_main_playbook')
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index a883e5c56..62135c761 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -138,8 +138,8 @@ class OOCliFixture(OOInstallFixture):
written_config = read_yaml(config_file)
self._verify_config_hosts(written_config, exp_hosts_len)
- if "Uninstalled" in result.output:
- # verify we exited on seeing uninstalled hosts
+ if "If you want to force reinstall" in result.output:
+ # verify we exited on seeing installed hosts
self.assertEqual(result.exit_code, 1)
else:
self.assert_result(result, 0)
@@ -156,7 +156,7 @@ class OOCliFixture(OOInstallFixture):
#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def build_input(ssh_user=None, hosts=None, variant_num=None,
add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
- master_lb=None, storage=None):
+ master_lb=('', False), storage=None):
"""
Build an input string simulating a user entering values in an interactive
attended install.
@@ -204,11 +204,11 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
i += 1
# You can pass a single master_lb or a list if you intend for one to get rejected:
- if master_lb:
- if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
- inputs.extend(master_lb[0])
- else:
- inputs.append(master_lb[0])
+ if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
+ inputs.extend(master_lb[0])
+ else:
+ inputs.append(master_lb[0])
+ if master_lb[0]:
inputs.append('y' if master_lb[1] else 'n')
if storage:
@@ -248,6 +248,7 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
inputs.extend([
confirm_facts,
'y', # lets do this
+ 'y',
])
return '\n'.join(inputs)
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
new file mode 100644
index 000000000..2e59d86f2
--- /dev/null
+++ b/utils/test/test_utils.py
@@ -0,0 +1,100 @@
+"""
+Unittests for ooinstall utils.
+"""
+
+import unittest
+import logging
+import sys
+import copy
+from ooinstall.utils import debug_env, is_valid_hostname
+import mock
+
+
+class TestUtils(unittest.TestCase):
+ """
+ Parent unittest TestCase.
+ """
+
+ def setUp(self):
+ self.debug_all_params = {
+ 'OPENSHIFT_FOO': 'bar',
+ 'ANSIBLE_FOO': 'bar',
+ 'OO_FOO': 'bar'
+ }
+
+ self.expected = [
+ mock.call('ANSIBLE_FOO: bar'),
+ mock.call('OPENSHIFT_FOO: bar'),
+ mock.call('OO_FOO: bar'),
+ ]
+
+ # python 2.x has assertItemsEqual, python 3.x has assertCountEqual
+ if sys.version_info.major > 3:
+ self.assertItemsEqual = self.assertCountEqual
+
+ ######################################################################
+ # Validate ooinstall.utils.debug_env functionality
+
+ def test_utils_debug_env_all_debugged(self):
+ """Verify debug_env debugs specific env variables"""
+
+ with mock.patch('ooinstall.utils.installer_log') as _il:
+ debug_env(self.debug_all_params)
+ print _il.debug.call_args_list
+
+ # Debug was called for each item we expect
+ self.assertEqual(
+ len(self.debug_all_params),
+ _il.debug.call_count)
+
+ # Each item we expect was logged
+ self.assertItemsEqual(
+ self.expected,
+ _il.debug.call_args_list)
+
+ def test_utils_debug_env_some_debugged(self):
+ """Verify debug_env skips non-wanted env variables"""
+ debug_some_params = copy.deepcopy(self.debug_all_params)
+ # This will not be logged by debug_env
+ debug_some_params['MG_FRBBR'] = "SKIPPED"
+
+ with mock.patch('ooinstall.utils.installer_log') as _il:
+ debug_env(debug_some_params)
+
+ # The actual number of debug calls was less than the
+ # number of items passed to debug_env
+ self.assertLess(
+ _il.debug.call_count,
+ len(debug_some_params))
+
+ self.assertItemsEqual(
+ self.expected,
+ _il.debug.call_args_list)
+
+ ######################################################################
+ def test_utils_is_valid_hostname_invalid(self):
+ """Verify is_valid_hostname can detect None or too-long hostnames"""
+ # A hostname that's empty, None, or more than 255 chars is invalid
+ empty_hostname = ''
+ res = is_valid_hostname(empty_hostname)
+ self.assertFalse(res)
+
+ none_hostname = None
+ res = is_valid_hostname(none_hostname)
+ self.assertFalse(res)
+
+ too_long_hostname = "a" * 256
+ res = is_valid_hostname(too_long_hostname)
+ self.assertFalse(res)
+
+ def test_utils_is_valid_hostname_ends_with_dot(self):
+ """Verify is_valid_hostname can parse hostnames with trailing periods"""
+ hostname = "foo.example.com."
+ res = is_valid_hostname(hostname)
+ self.assertTrue(res)
+
+ def test_utils_is_valid_hostname_normal_hostname(self):
+ """Verify is_valid_hostname can parse regular hostnames"""
+ hostname = "foo.example.com"
+ res = is_valid_hostname(hostname)
+ self.assertTrue(res)
diff --git a/utils/workflows/enterprise_deploy/openshift.sh b/utils/workflows/enterprise_deploy/openshift.sh
deleted file mode 100644
index 040a9a84d..000000000
--- a/utils/workflows/enterprise_deploy/openshift.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-# This file is not used for OpenShift 3.0. It's merely an artifact of the the
-# installation framework originally used for OpenShift 2.x.