summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.dockerignore2
-rw-r--r--.flake82
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--ansible.cfg2
-rw-r--r--files/origin-components/console-config.yaml25
-rw-r--r--files/origin-components/console-rbac-template.yaml38
-rw-r--r--images/installer/Dockerfile8
-rw-r--r--images/installer/Dockerfile.rhel72
-rwxr-xr-ximages/installer/root/usr/local/bin/entrypoint-gcp51
-rwxr-xr-ximages/installer/root/usr/local/bin/user_setup2
-rw-r--r--inventory/.gitignore1
-rw-r--r--inventory/dynamic/gcp/README.md1
-rw-r--r--inventory/dynamic/gcp/ansible.cfg45
-rw-r--r--inventory/dynamic/gcp/group_vars/all/00_defaults.yml42
-rwxr-xr-xinventory/dynamic/gcp/hosts.py408
-rwxr-xr-xinventory/dynamic/gcp/hosts.sh15
-rw-r--r--inventory/dynamic/gcp/none1
-rw-r--r--inventory/dynamic/injected/README.md3
-rw-r--r--inventory/hosts.example6
-rw-r--r--inventory/hosts.grafana.example12
-rw-r--r--openshift-ansible.spec114
-rw-r--r--playbooks/aws/README.md14
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml41
-rw-r--r--playbooks/aws/openshift-cluster/hosted.yml25
-rw-r--r--playbooks/aws/openshift-cluster/install.yml27
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml10
-rw-r--r--playbooks/aws/openshift-cluster/provision_elb.yml9
-rw-r--r--playbooks/aws/openshift-cluster/provision_install.yml4
-rw-r--r--playbooks/aws/openshift-cluster/provision_s3.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_sec_group.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_vpc.yml10
-rw-r--r--playbooks/cluster-operator/aws/infrastructure.yml21
l---------playbooks/cluster-operator/aws/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml64
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml2
-rw-r--r--playbooks/common/private/components.yml38
-rw-r--r--playbooks/common/private/control_plane.yml34
-rw-r--r--playbooks/container-runtime/private/build_container_groups.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml8
-rw-r--r--playbooks/container-runtime/private/setup_storage.yml7
-rw-r--r--playbooks/deploy_cluster.yml37
-rw-r--r--playbooks/gcp/openshift-cluster/build_base_image.yml162
-rw-r--r--playbooks/gcp/openshift-cluster/build_image.yml106
-rw-r--r--playbooks/gcp/openshift-cluster/deprovision.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/install.yml33
-rw-r--r--playbooks/gcp/openshift-cluster/install_gcp.yml21
-rw-r--r--playbooks/gcp/openshift-cluster/inventory.yml10
-rw-r--r--playbooks/gcp/openshift-cluster/launch.yml12
-rw-r--r--playbooks/gcp/openshift-cluster/provision.yml (renamed from playbooks/gcp/provision.yml)9
-rw-r--r--playbooks/gcp/openshift-cluster/publish_image.yml9
l---------playbooks/gcp/openshift-cluster/roles1
-rw-r--r--playbooks/init/base_packages.yml4
-rw-r--r--playbooks/init/basic_facts.yml (renamed from playbooks/init/facts.yml)49
-rw-r--r--playbooks/init/cluster_facts.yml42
-rw-r--r--playbooks/init/main.yml11
-rw-r--r--playbooks/init/repos.yml4
-rw-r--r--playbooks/init/sanity_checks.yml3
-rw-r--r--playbooks/init/validate_hostnames.yml4
-rw-r--r--playbooks/openshift-checks/adhoc.yml1
-rw-r--r--playbooks/openshift-etcd/scaleup.yml47
-rw-r--r--playbooks/openshift-etcd/upgrade.yml6
-rw-r--r--playbooks/openshift-grafana/config.yml4
-rw-r--r--playbooks/openshift-grafana/private/config.yml6
l---------playbooks/openshift-grafana/private/filter_plugins1
l---------playbooks/openshift-grafana/private/lookup_plugins1
l---------playbooks/openshift-grafana/private/roles1
-rw-r--r--playbooks/openshift-loadbalancer/private/config.yml2
-rw-r--r--playbooks/openshift-master/scaleup.yml41
-rw-r--r--playbooks/openshift-node/scaleup.yml24
-rw-r--r--playbooks/openstack/README.md28
-rw-r--r--playbooks/openstack/advanced-configuration.md7
-rwxr-xr-xplaybooks/openstack/inventory.py (renamed from playbooks/openstack/sample-inventory/inventory.py)0
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml4
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml2
-rw-r--r--playbooks/prerequisites.yml9
-rw-r--r--roles/ansible_service_broker/tasks/install.yml17
-rw-r--r--roles/calico_master/tasks/main.yml2
-rw-r--r--roles/container_runtime/defaults/main.yml39
-rw-r--r--roles/container_runtime/tasks/package_docker.yml11
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml2
-rw-r--r--roles/flannel/meta/main.yml1
-rw-r--r--roles/kuryr/tasks/node.yaml2
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j219
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2357
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py25
-rw-r--r--roles/lib_utils/filter_plugins/openshift_master.py6
-rw-r--r--roles/lib_utils/library/docker_creds.py4
-rw-r--r--roles/openshift_aws/defaults/main.yml12
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml4
-rw-r--r--roles/openshift_aws/tasks/provision.yml17
-rw-r--r--roles/openshift_aws/tasks/provision_elb.yml15
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml17
-rw-r--r--roles/openshift_aws/tasks/uninstall_security_group.yml14
-rw-r--r--roles/openshift_aws/tasks/uninstall_ssh_keys.yml9
-rw-r--r--roles/openshift_aws/tasks/uninstall_vpc.yml36
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml10
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml21
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml5
-rw-r--r--roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml68
-rw-r--r--roles/openshift_bootstrap_autoapprover/tasks/main.yml28
-rw-r--r--roles/openshift_ca/tasks/main.yml3
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml10
-rw-r--r--roles/openshift_examples/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py5
-rw-r--r--roles/openshift_gcp/files/bootstrap-script.sh42
-rw-r--r--roles/openshift_gcp/files/openshift-bootstrap-update.service7
-rw-r--r--roles/openshift_gcp/files/openshift-bootstrap-update.timer10
-rw-r--r--roles/openshift_gcp/files/partition.conf (renamed from roles/openshift_gcp_image_prep/files/partition.conf)2
-rw-r--r--roles/openshift_gcp/meta/main.yml17
-rw-r--r--roles/openshift_gcp/tasks/add_custom_repositories.yml20
-rw-r--r--roles/openshift_gcp/tasks/configure_gcp_base_image.yml (renamed from roles/openshift_gcp_image_prep/tasks/main.yaml)14
-rw-r--r--roles/openshift_gcp/tasks/configure_master_bootstrap.yml36
-rw-r--r--roles/openshift_gcp/tasks/configure_master_healthcheck.yml19
-rw-r--r--roles/openshift_gcp/tasks/dynamic_inventory.yml5
-rw-r--r--roles/openshift_gcp/tasks/frequent_log_rotation.yml18
-rw-r--r--roles/openshift_gcp/tasks/main.yml (renamed from roles/openshift_gcp/tasks/main.yaml)4
-rw-r--r--roles/openshift_gcp/tasks/node_cloud_config.yml12
-rw-r--r--roles/openshift_gcp/tasks/publish_image.yml32
-rw-r--r--roles/openshift_gcp/tasks/setup_scale_group_facts.yml44
-rw-r--r--roles/openshift_gcp/templates/inventory.j2.sh8
-rw-r--r--roles/openshift_gcp/templates/master_healthcheck.j268
-rw-r--r--roles/openshift_gcp/templates/openshift-bootstrap-update.j27
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh17
-rw-r--r--roles/openshift_gcp/templates/yum_repo.j220
-rw-r--r--roles/openshift_grafana/defaults/main.yml12
-rw-r--r--roles/openshift_grafana/files/grafana-ocp-oauth.yml661
-rw-r--r--roles/openshift_grafana/files/grafana-ocp.yml76
-rw-r--r--roles/openshift_grafana/files/openshift-cluster-monitoring.json5138
-rw-r--r--roles/openshift_grafana/meta/main.yml13
-rw-r--r--roles/openshift_grafana/tasks/gf-permissions.yml12
-rw-r--r--roles/openshift_grafana/tasks/main.yml122
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py43
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py34
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py13
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py27
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py58
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py12
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py23
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py5
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml13
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml1
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml10
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml14
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/get_es_version.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml4
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml23
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml8
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml10
-rw-r--r--roles/openshift_node/defaults/main.yml18
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml19
-rw-r--r--roles/openshift_node/tasks/upgrade/config_changes.yml6
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml20
-rw-r--r--roles/openshift_node_certificates/vars/main.yml2
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j224
-rw-r--r--roles/openshift_openstack/templates/heat_stack_server.yaml.j22
-rw-r--r--roles/openshift_persistent_volumes/tasks/pv.yml2
-rw-r--r--roles/openshift_persistent_volumes/tasks/pvc.yml2
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml12
-rw-r--r--roles/openshift_version/tasks/check_available_rpms.yml2
-rw-r--r--roles/openshift_version/tasks/first_master_containerized_version.yml5
-rw-r--r--roles/openshift_version/tasks/first_master_rpm_version.yml6
-rw-r--r--roles/openshift_version/tasks/masters_and_nodes.yml7
-rw-r--r--roles/openshift_web_console/defaults/main.yml3
-rw-r--r--roles/openshift_web_console/tasks/install.yml114
-rw-r--r--roles/openshift_web_console/tasks/rollout_console.yml20
-rw-r--r--roles/openshift_web_console/tasks/update_asset_config.yml68
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml67
-rw-r--r--roles/openshift_web_console/vars/default_images.yml4
-rw-r--r--roles/openshift_web_console/vars/main.yml1
-rw-r--r--roles/openshift_web_console/vars/openshift-enterprise.yml4
-rw-r--r--roles/os_firewall/tasks/firewalld.yml5
-rw-r--r--roles/template_service_broker/defaults/main.yml2
-rw-r--r--roles/template_service_broker/tasks/install.yml21
-rw-r--r--roles/template_service_broker/tasks/remove.yml15
-rw-r--r--roles/template_service_broker/vars/default_images.yml4
-rw-r--r--roles/template_service_broker/vars/openshift-enterprise.yml4
-rw-r--r--roles/tuned/tasks/main.yml7
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py6
-rw-r--r--utils/src/ooinstall/cli_installer.py93
-rw-r--r--utils/src/ooinstall/openshift_ansible.py20
-rw-r--r--utils/test/cli_installer_tests.py285
-rw-r--r--utils/test/fixture.py11
-rw-r--r--utils/test/oo_config_tests.py31
-rw-r--r--utils/test/test_utils.py1
208 files changed, 9366 insertions, 974 deletions
diff --git a/.dockerignore b/.dockerignore
index 0a70c5bfa..2509d48b5 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -2,7 +2,7 @@
bin
docs
hack
-inventory
+inventory/hosts.*
test
utils
**/*.md
diff --git a/.flake8 b/.flake8
index 99ae3c2f0..cce460d3c 100644
--- a/.flake8
+++ b/.flake8
@@ -1,5 +1,5 @@
[flake8]
# TODO: cleanup flake8 issues with utils/test/*
-exclude=.tox,inventory,utils/test
+exclude=.tox,inventory
max_line_length = 120
ignore = E501,T003
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index a2b323b95..d6dd5a3c8 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.19.0 ./
+3.9.0-0.23.0 ./
diff --git a/ansible.cfg b/ansible.cfg
index c1c76a496..67149cb35 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -22,7 +22,7 @@ fact_caching = jsonfile
fact_caching_connection = $HOME/ansible/facts
fact_caching_timeout = 600
callback_whitelist = profile_tasks
-inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt, .ini
# work around privilege escalation timeouts in ansible:
timeout = 30
diff --git a/files/origin-components/console-config.yaml b/files/origin-components/console-config.yaml
index e104e8028..55c650fbe 100644
--- a/files/origin-components/console-config.yaml
+++ b/files/origin-components/console-config.yaml
@@ -1,15 +1,18 @@
-kind: WebConsoleConfiguration
apiVersion: webconsole.config.openshift.io/v1
-extensionDevelopment: false
-extensionProperties: null
-extensionScripts: null
-extensionStylesheets: null
-extensions: null
-loggingPublicURL: ""
-logoutURL: ""
-masterPublicURL: https://127.0.0.1:8443
-metricsPublicURL: ""
-publicURL: https://127.0.0.1:8443/console/
+kind: WebConsoleConfiguration
+clusterInfo:
+ consolePublicURL: https://127.0.0.1:8443/console/
+ loggingPublicURL: ""
+ logoutPublicURL: ""
+ masterPublicURL: https://127.0.0.1:8443
+ metricsPublicURL: ""
+extensions:
+ scriptURLs: []
+ stylesheetURLs: []
+ properties: null
+features:
+ inactivityTimeoutMinutes: 0
+ clusterResourceOverridesEnabled: false
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
diff --git a/files/origin-components/console-rbac-template.yaml b/files/origin-components/console-rbac-template.yaml
new file mode 100644
index 000000000..9ee117199
--- /dev/null
+++ b/files/origin-components/console-rbac-template.yaml
@@ -0,0 +1,38 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: web-console-server-rbac
+parameters:
+- name: NAMESPACE
+ # This namespace cannot be changed. Only `openshift-web-console` is supported.
+ value: openshift-web-console
+objects:
+
+
+# allow grant powers to the webconsole server for cluster inspection
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRole
+ metadata:
+ name: system:openshift:web-console-server
+ rules:
+ - apiGroups:
+ - "servicecatalog.k8s.io"
+ resources:
+ - clusterservicebrokers
+ verbs:
+ - get
+ - list
+ - watch
+
+# Grant the service account for the web console
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: system:openshift:web-console-server
+ roleRef:
+ kind: ClusterRole
+ name: system:openshift:web-console-server
+ subjects:
+ - kind: ServiceAccount
+ namespace: ${NAMESPACE}
+ name: webconsole
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index db362bd65..22a0d06a0 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -8,12 +8,14 @@ USER root
COPY images/installer/origin-extra-root /
# install ansible and deps
-RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
+RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible python2-boto google-cloud-sdk-183.0.0 which" \
+ && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
&& yum install -y epel-release \
&& yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
- && rpm -V $INSTALL_PKGS $EPEL_PKGS \
+ && EPEL_TESTING_PKGS="python2-libcloud" \
+ && yum install -y --enablerepo=epel-testing --setopt=tsflags=nodocs $EPEL_TESTING_PKGS \
+ && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
&& yum clean all
LABEL name="openshift/origin-ansible" \
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 05df6b43a..3b05c1aa6 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
USER root
# Playbooks, roles, and their dependencies are installed from packages.
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
&& yum repolist > /dev/null \
&& yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
&& yum-config-manager --enable rhel-7-server-rh-common-rpms \
diff --git a/images/installer/root/usr/local/bin/entrypoint-gcp b/images/installer/root/usr/local/bin/entrypoint-gcp
new file mode 100755
index 000000000..d0ffd9904
--- /dev/null
+++ b/images/installer/root/usr/local/bin/entrypoint-gcp
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# This file sets up the user to run in the GCP environment.
+# It provides dynamic inventory that works well when run in
+# a container environment by setting up a default inventory.
+# It assumes the user has provided a GCP service account token
+# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected"
+# and automatically links any YAML files found into the group
+# vars directory, which allows the playbook to more easily be
+# run in containerized contexts.
+
+WORK=$(pwd)
+FILES="${WORK}/inventory/dynamic/injected"
+
+# Patch /etc/passwd file with the current user info.
+# The current user's entry must be correctly defined in this file in order for
+# the `ssh` command to work within the created container.
+
+if ! whoami &>/dev/null; then
+ echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
+fi
+
+# Provide a "files_dir" variable that points to inventory/dynamic/injected
+echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/gcp/group_vars/all/00_default_files_dir.yml"
+# Add any injected variable files into the group vars directory
+find "${FILES}" -name '*.yml' -or -name '*.yaml' -or -name vars | xargs -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/gcp/group_vars/all"
+# Avoid sudo when running locally - nothing in the image requires it.
+mkdir -p "${WORK}/inventory/dynamic/gcp/host_vars/localhost"
+echo "ansible_become: no" > "${WORK}/inventory/dynamic/gcp/host_vars/localhost/00_skip_root.yaml"
+
+if [[ -z "${ANSIBLE_CONFIG-}" ]]; then
+ export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/gcp/ansible.cfg"
+fi
+
+# SSH requires the file to be owned by the current user, but Docker copies
+# files in as root. Put the file into the ssh dir with the right permissions
+if [[ -f "${FILES}/ssh-privatekey" ]]; then
+ keyfile="${HOME}/.ssh/google_compute_engine"
+ mkdir "${HOME}/.ssh"
+ rm -f "${keyfile}"
+ cat "${FILES}/ssh-privatekey" > "${keyfile}"
+ chmod 0600 "${keyfile}"
+ ssh-keygen -y -f "${keyfile}" > "${keyfile}.pub"
+fi
+if [[ -f "${FILES}/gce.json" ]]; then
+ gcloud auth activate-service-account --key-file="${FILES}/gce.json"
+else
+ echo "No service account file found at ${FILES}/gce.json, bypassing login"
+fi
+
+exec "$@" \ No newline at end of file
diff --git a/images/installer/root/usr/local/bin/user_setup b/images/installer/root/usr/local/bin/user_setup
index b76e60a4d..dba0af3e4 100755
--- a/images/installer/root/usr/local/bin/user_setup
+++ b/images/installer/root/usr/local/bin/user_setup
@@ -12,6 +12,8 @@ chmod g+rw /etc/passwd
# ensure that the ansible content is accessible
chmod -R g+r ${WORK_DIR}
find ${WORK_DIR} -type d -exec chmod g+x {} +
+# ensure that the dynamic inventory dir can have content created
+find ${WORK_DIR} -type d -exec chmod g+wx {} +
# no need for this script to remain in the image after running
rm $0
diff --git a/inventory/.gitignore b/inventory/.gitignore
index 6ff331c7e..97aa044f6 100644
--- a/inventory/.gitignore
+++ b/inventory/.gitignore
@@ -1 +1,2 @@
hosts
+/dynamic/gcp/group_vars/all/00_default_files_dir.yml \ No newline at end of file
diff --git a/inventory/dynamic/gcp/README.md b/inventory/dynamic/gcp/README.md
new file mode 100644
index 000000000..217a035ca
--- /dev/null
+++ b/inventory/dynamic/gcp/README.md
@@ -0,0 +1 @@
+This directory provides dynamic inventory for a GCP cluster configured via the GCP provisioning playbook. Set inventory to `inventory/dynamic/gcp/hosts.sh` to calculate the appropriate host set. \ No newline at end of file
diff --git a/inventory/dynamic/gcp/ansible.cfg b/inventory/dynamic/gcp/ansible.cfg
new file mode 100644
index 000000000..f87d51f28
--- /dev/null
+++ b/inventory/dynamic/gcp/ansible.cfg
@@ -0,0 +1,45 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts.
+
+[defaults]
+# Set the log_path
+#log_path = /tmp/ansible.log
+
+private_key_file = $HOME/.ssh/google_compute_engine
+
+# Additional default options for OpenShift Ansible
+forks = 50
+host_key_checking = False
+retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
+nocows = True
+remote_user = cloud-user
+roles_path = ../../../roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+# work around privilege escalation timeouts in ansible:
+timeout = 30
+
+# Uncomment to use the provided example inventory
+inventory = hosts.sh
+
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
+# Additional ssh options for OpenShift Ansible
+[ssh_connection]
+pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r
diff --git a/inventory/dynamic/gcp/group_vars/all/00_defaults.yml b/inventory/dynamic/gcp/group_vars/all/00_defaults.yml
new file mode 100644
index 000000000..2f72e905f
--- /dev/null
+++ b/inventory/dynamic/gcp/group_vars/all/00_defaults.yml
@@ -0,0 +1,42 @@
+# GCP uses non-root users by default, so sudo by default
+---
+ansible_become: yes
+
+openshift_deployment_type: origin
+
+# Debugging settings
+debug_level: 2
+openshift_debug_level: "{{ debug_level }}"
+openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
+openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
+
+# External API settings
+console_port: 443
+internal_console_port: 8443
+openshift_master_api_port: "8443"
+openshift_master_console_port: "8443"
+openshift_master_cluster_hostname: "internal-openshift-master.{{ public_hosted_zone }}"
+openshift_master_cluster_public_hostname: "openshift-master.{{ public_hosted_zone }}"
+openshift_master_default_subdomain: "{{ wildcard_zone }}"
+
+# Cloud specific settings
+openshift_cloudprovider_kind: gce
+openshift_hosted_registry_storage_provider: gcs
+
+openshift_master_access_token_max_seconds: 2419200
+openshift_master_identity_providers:
+
+# Networking settings
+openshift_node_port_range: 30000-32000
+openshift_node_open_ports: [{"service":"Router stats port", "port":"1936/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/udp"}]
+openshift_node_sdn_mtu: 1410
+osm_cluster_network_cidr: 172.16.0.0/16
+osm_host_subnet_length: 9
+openshift_portal_net: 172.30.0.0/16
+
+# Default cluster configuration
+openshift_master_cluster_method: native
+openshift_schedulable: true
+# TODO: change to upstream conventions
+openshift_hosted_infra_selector: "role=infra"
+osm_default_node_selector: "role=app"
diff --git a/inventory/dynamic/gcp/hosts.py b/inventory/dynamic/gcp/hosts.py
new file mode 100755
index 000000000..cd1262622
--- /dev/null
+++ b/inventory/dynamic/gcp/hosts.py
@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a derivative of gce.py that adds support for filtering
+# the returned inventory to only include instances that have tags
+# as specified by GCE_TAGGED_INSTANCES. This prevents dynamic
+# inventory for multiple clusters within the same project from
+# accidentally stomping each other.
+
+# pylint: skip-file
+
+'''
+GCE external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests
+Google Compute Engine via the libcloud library. Full install/configuration
+instructions for the gce* modules can be found in the comments of
+ansible/test/gce_tests.py.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the libcloud Node object:
+ - gce_uuid
+ - gce_id
+ - gce_image
+ - gce_machine_type
+ - gce_private_ip
+ - gce_public_ip
+ - gce_name
+ - gce_description
+ - gce_status
+ - gce_zone
+ - gce_tags
+ - gce_metadata
+ - gce_network
+
+When run in --list mode, instances are grouped by the following categories:
+ - zone:
+ zone group name examples are us-central1-b, europe-west1-a, etc.
+ - instance tags:
+ An entry is created for each tag. For example, if you have two instances
+ with a common tag called 'foo', they will both be grouped together under
+ the 'tag_foo' name.
+ - network name:
+ the name of the network is appended to 'network_' (e.g. the 'default'
+ network will result in a group named 'network_default')
+ - machine type
+ types follow a pattern like n1-standard-4, g1-small, etc.
+ - running status:
+ group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
+ - image:
+ when using an ephemeral/scratch disk, this will be set to the image name
+ used when creating the instance (e.g. debian-7-wheezy-v20130816). when
+ your instance was created with a root persistent disk it will be set to
+ 'persistent_disk' since there is no current way to determine the image.
+
+Examples:
+ Execute uname on all instances in the us-central1-a zone
+ $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
+
+ Use the GCE inventory script to print out instance specific information
+ $ contrib/inventory/gce.py --host my_instance
+
+Author: Eric Johnson <erjohnso@google.com>
+Contributors: Matt Hite <mhite@hotmail.com>
+Version: 0.0.2
+'''
+
+__requires__ = ['pycrypto>=2.6']
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
+USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
+USER_AGENT_VERSION="v2"
+
+import sys
+import os
+import time
+import argparse
+import ConfigParser
+
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+except:
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class GceInventory(object):
+ def __init__(self):
+ # Read settings and parse CLI arguments
+ self.parse_cli_args()
+ self.config = self.get_config()
+ self.driver = self.get_gce_driver()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Just display data for specific host
+ if self.args.host:
+ print(self.json_format_dict(self.node_to_dict(
+ self.get_instance(self.args.host)),
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ zones = self.parse_env_zones()
+
+ # Otherwise, assume user wants all instances grouped
+ print(self.json_format_dict(self.group_instances(zones),
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ def get_config(self):
+ """
+ Populates a SafeConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
+ """
+ gce_ini_default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "gce.ini")
+ gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+
+ # Create a ConfigParser.
+ # This provides empty defaults to each key, so that environment
+ # variable configuration (as opposed to INI configuration) is able
+ # to work.
+ config = ConfigParser.SafeConfigParser(defaults={
+ 'gce_service_account_email_address': '',
+ 'gce_service_account_pem_file_path': '',
+ 'gce_project_id': '',
+ 'libcloud_secrets': '',
+ 'inventory_ip_type': '',
+ })
+ if 'gce' not in config.sections():
+ config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+
+ config.read(gce_ini_path)
+
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_driver(self):
+ """Determine the GCE authorization settings and return a
+ libcloud driver.
+ """
+ # Attempt to get GCE params from a configuration file, if one
+ # exists.
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
+ secrets_found = False
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except:
+ pass
+
+ if not secrets_found and secrets_path:
+ if not secrets_path.endswith('secrets.py'):
+ err = "Must specify libcloud secrets file as "
+ err += "/absolute/path/to/secrets.py"
+ sys.exit(err)
+ sys.path.append(os.path.dirname(secrets_path))
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except:
+ pass
+ if not secrets_found:
+ args = [
+ self.config.get('gce','gce_service_account_email_address'),
+ self.config.get('gce','gce_service_account_pem_file_path')
+ ]
+ kwargs = {'project': self.config.get('gce', 'gce_project_id')}
+
+ # If the appropriate environment variables are set, they override
+ # other configuration; process those into our args and kwargs.
+ args[0] = os.environ.get('GCE_EMAIL', args[0])
+ args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
+ kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
+
+ # Retrieve and return the GCE driver.
+ gce = get_driver(Provider.GCE)(*args, **kwargs)
+ gce.connection.user_agent_append(
+ '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
+ )
+ return gce
+
+ def parse_env_zones(self):
+ '''returns a list of comma seperated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on GCE')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all information about an instance')
+ parser.add_argument('--tagged', action='store',
+ help='Only include instances with this tag')
+ parser.add_argument('--pretty', action='store_true', default=False,
+ help='Pretty format (default: False)')
+ self.args = parser.parse_args()
+
+ tag_env = os.environ.get('GCE_TAGGED_INSTANCES')
+ if not self.args.tagged and tag_env:
+ self.args.tagged = tag_env
+
+ def node_to_dict(self, inst):
+ md = {}
+
+ if inst is None:
+ return {}
+
+ if inst.extra['metadata'].has_key('items'):
+ for entry in inst.extra['metadata']['items']:
+ md[entry['key']] = entry['value']
+
+ net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
+ return {
+ 'gce_uuid': inst.uuid,
+ 'gce_id': inst.id,
+ 'gce_image': inst.image,
+ 'gce_machine_type': inst.size,
+ 'gce_private_ip': inst.private_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
+ 'gce_name': inst.name,
+ 'gce_description': inst.extra['description'],
+ 'gce_status': inst.extra['status'],
+ 'gce_zone': inst.extra['zone'].name,
+ 'gce_tags': inst.extra['tags'],
+ 'gce_metadata': md,
+ 'gce_network': net,
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_host': ssh_host
+ }
+
+ def get_instance(self, instance_name):
+ '''Gets details about a specific instance '''
+ try:
+ return self.driver.ex_get_node(instance_name)
+ except Exception as e:
+ return None
+
+ def group_instances(self, zones=None):
+ '''Group all instances'''
+ groups = {}
+ meta = {}
+ meta["hostvars"] = {}
+
+ # list_nodes will fail if a disk is in the process of being deleted
+ # from a node, which is not uncommon if other playbooks are managing
+ # the same project. Retry if we receive a not found error.
+ nodes = []
+ tries = 0
+ while True:
+ try:
+ nodes = self.driver.list_nodes()
+ break
+ except ResourceNotFoundError:
+ tries = tries + 1
+ if tries > 15:
+ raise e
+ time.sleep(1)
+ continue
+
+ for node in nodes:
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
+ name = node.name
+
+ if self.args.tagged and self.args.tagged not in node.extra['tags']:
+ continue
+
+ meta["hostvars"][name] = self.node_to_dict(node)
+
+ zone = node.extra['zone'].name
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if groups.has_key(zone): groups[zone].append(name)
+ else: groups[zone] = [name]
+
+ tags = node.extra['tags']
+ for t in tags:
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
+ if groups.has_key(tag): groups[tag].append(name)
+ else: groups[tag] = [name]
+
+ net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ net = 'network_%s' % net
+ if groups.has_key(net): groups[net].append(name)
+ else: groups[net] = [name]
+
+ machine_type = node.size
+ if groups.has_key(machine_type): groups[machine_type].append(name)
+ else: groups[machine_type] = [name]
+
+ image = node.image and node.image or 'persistent_disk'
+ if groups.has_key(image): groups[image].append(name)
+ else: groups[image] = [name]
+
+ status = node.extra['status']
+ stat = 'status_%s' % status.lower()
+ if groups.has_key(stat): groups[stat].append(name)
+ else: groups[stat] = [name]
+
+ groups["_meta"] = meta
+
+ return groups
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+GceInventory()
diff --git a/inventory/dynamic/gcp/hosts.sh b/inventory/dynamic/gcp/hosts.sh
new file mode 100755
index 000000000..0c88e3a6b
--- /dev/null
+++ b/inventory/dynamic/gcp/hosts.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -euo pipefail
+
+# Use a playbook to calculate the inventory dynamically from
+# the provided cluster variables.
+src="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../playbooks/gcp/openshift-cluster/inventory.yml 2>&1 )"; then
+ echo "error: Inventory configuration failed" 1>&2
+ echo "$out" 1>&2
+ echo "{}"
+ exit 1
+fi
+source "/tmp/inventory.sh"
+exec ${src}/hosts.py
diff --git a/inventory/dynamic/gcp/none b/inventory/dynamic/gcp/none
new file mode 100644
index 000000000..9e26dfeeb
--- /dev/null
+++ b/inventory/dynamic/gcp/none
@@ -0,0 +1 @@
+{} \ No newline at end of file
diff --git a/inventory/dynamic/injected/README.md b/inventory/dynamic/injected/README.md
new file mode 100644
index 000000000..5e2e4c549
--- /dev/null
+++ b/inventory/dynamic/injected/README.md
@@ -0,0 +1,3 @@
+This directory may be used to inject inventory into openshift-ansible
+when used in a container. Other scripts like the cloud provider entrypoints
+will automatically use the content of this directory as inventory.
diff --git a/inventory/hosts.example b/inventory/hosts.example
index da60b63e6..f9f331880 100644
--- a/inventory/hosts.example
+++ b/inventory/hosts.example
@@ -845,12 +845,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# See: https://github.com/nickhammond/ansible-logrotate
#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
-# openshift-ansible will wait indefinitely for your input when it detects that the
+# The OpenShift-Ansible installer will fail when it detects that the
# value of openshift_hostname resolves to an IP address not bound to any local
# interfaces. This mis-configuration is problematic for any pod leveraging host
# networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
+# Setting this variable to false will override that check.
+#openshift_hostname_check=true
# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
# in versions >= 3.6
diff --git a/inventory/hosts.grafana.example b/inventory/hosts.grafana.example
new file mode 100644
index 000000000..a5999578f
--- /dev/null
+++ b/inventory/hosts.grafana.example
@@ -0,0 +1,12 @@
+[OSEv3:children]
+masters
+nodes
+
+[OSEv3:vars]
+# Grafana Configuration
+#gf_datasource_name="example"
+#gf_prometheus_namespace="openshift-metrics"
+#gf_oauth=true
+
+[masters]
+master
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index a301cfe5b..719e54eb9 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.19.0%{?dist}
+Release: 0.23.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -48,7 +48,8 @@ popd
%install
# Base openshift-ansible install
mkdir -p %{buildroot}%{_datadir}/%{name}
-mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible/%{name}/inventory
+cp -rp inventory/dynamic %{buildroot}%{_datadir}/ansible/%{name}/inventory
# openshift-ansible-bin install
mkdir -p %{buildroot}%{_bindir}
@@ -62,7 +63,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
# Install example inventory into docs/examples
mkdir -p docs/example-inventories
-cp inventory/* docs/example-inventories/
+cp inventory/hosts.* inventory/README.md docs/example-inventories/
# openshift-ansible-files install
cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
@@ -101,6 +102,7 @@ popd
%license LICENSE
%dir %{_datadir}/ansible/%{name}
%{_datadir}/ansible/%{name}/files
+%{_datadir}/ansible/%{name}/inventory/dynamic
%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
# ----------------------------------------------------------------------------------
@@ -202,6 +204,112 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Jan 23 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.23.0
+- docker_image_availability: enable skopeo to use proxies (lmeyer@redhat.com)
+- Install base_packages earlier (mgugino@redhat.com)
+- allow uninstalling AWS objects created by prerequisite playbook
+ (jdiaz@redhat.com)
+- Bug 1536262: Default console and TSB node selector to
+ openshift_hosted_infra_selector (spadgett@redhat.com)
+- Migrate master-config.yaml asset config (spadgett@redhat.com)
+- Fix master scaleup play (mgugino@redhat.com)
+- use admin credentials for tsb install operations (bparees@redhat.com)
+- Fix etcd-upgrade sanity checks (mgugino@redhat.com)
+- Bug 1536253: Pass `--config` flag on oc commands when installing console
+ (spadgett@redhat.com)
+- Fix enterprise registry-console prefix (sdodson@redhat.com)
+- [release-3.7] Fix enterprise registry console image prefix
+ (sdodson@redhat.com)
+- [release-3.6] Fix enterprise registry console image prefix
+ (sdodson@redhat.com)
+- Bug 1512825 - add mux pod failed for Serial number 02 has already been issued
+ (nhosoi@redhat.com)
+- Remove old console asset config (spadgett@redhat.com)
+- Add support for Amazon EC2 C5 instance types (rteague@redhat.com)
+- Fix provider network support at openstack playbook (ltomasbo@redhat.com)
+
+* Fri Jan 19 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.22.0
+- Fix OpenStack readme (tomas@sedovic.cz)
+- Quick installer: deprecate upgrades (vrutkovs@redhat.com)
+- Fix node scaleup plays (mgugino@redhat.com)
+- Rollout console after template service broker install (spadgett@redhat.com)
+- Use openshift_is_containerized instead of openshift_is_atomic when installing
+ etcd (vrutkovs@redhat.com)
+- Bug 1535947: Fix missing task in metrics, logging uninstall playbooks
+ (spadgett@redhat.com)
+- Make openshift_web_console_prefix defaults like other components
+ (sdodson@redhat.com)
+- Allow for firewalld on atomic host (sdodson@redhat.com)
+- Drop the testing repo var from openstack readme (tomas@sedovic.cz)
+- Add Azure to support openshift_cloudprovider_kind (wehe@redhat.com)
+- bug 1523047. Annotate ops projects with an .operation prefix
+ (jcantril@redhat.com)
+- Pull openshift_image_tag from oo_masters_to_config rather oo_first_master.
+ (abutcher@redhat.com)
+- Ensure atomic_proxies are configured with docker (mgugino@redhat.com)
+- Default install_result when reloading generated facts. (abutcher@redhat.com)
+- health checks: update required pkg versions (lmeyer@redhat.com)
+- health checks: factor out get_required_version (lmeyer@redhat.com)
+- package_version check: reuse get_major_minor_version (lmeyer@redhat.com)
+- Rework default TSB prefix and imagename to match other services
+ (vrutkovs@redhat.com)
+- Add new grafana playbook. (mrsiano@gmail.com)
+- Remove duplication in node acceptance playbook and setup master groups so
+ that we can use the first master's ansible_ssh_user when delegating.
+ (abutcher@redhat.com)
+- Setting default storage_class_names for when calling
+ openshift_logging_elasticsearch role (ewolinet@redhat.com)
+- adding check if secret auth is needed (shawn.hurley21@gmail.com)
+- adding asb auth as a secret. (shawn.hurley21@gmail.com)
+- Ensure we are running oc execs against running pods (ewolinet@redhat.com)
+- Automatic profile setting for tuned 2.9 (jmencak@redhat.com)
+- Fix flake8 errors in utils/test (vrutkovs@redhat.com)
+- kibana checks: use six.moves instead of ImportError (vrutkovs@redhat.com)
+
+* Wed Jan 17 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.21.0
+- Add call to 3.8 playbook in 3.9 upgrade (sdodson@redhat.com)
+- Remove 3.8 and 3.9 specific steps right now (sdodson@redhat.com)
+- Exclude 3.9 packages during 3.8 upgrade (sdodson@redhat.com)
+- fix typos (sdodson@redhat.com)
+- Ensure openshift_client_binary is set (sdodson@redhat.com)
+- Add init/main.yml to etc-upgrade (mgugino@redhat.com)
+- Fix a typo in "Determine if growpart is installed" (vrutkovs@redhat.com)
+- Check rc for commands with openshift_client_binary and failed_when
+ (vrutkovs@redhat.com)
+- Update console config for API changes (spadgett@redhat.com)
+- include elasticsearch container name (jvallejo@redhat.com)
+- openshift_checks: repair adhoc list-checks mode (lmeyer@redhat.com)
+- Remove tuned-profiles from list of master packages upgraded
+ (sdodson@redhat.com)
+- Add missing task that got dropped in a refactor (sdodson@redhat.com)
+- Web Console: use a different var for asset config (vrutkovs@redhat.com)
+- Document the inventory change (tomas@sedovic.cz)
+- Move the OpenStack dynamic inventory from sample (tomas@sedovic.cz)
+- fix bug 1534271 (wmeng@redhat.com)
+- Don't use from ansible.module_utils.six as its no longer available in Ansible
+ 2.4 (vrutkovs@redhat.com)
+- Add console RBAC template (spadgett@redhat.com)
+- Setup master groups in order to use the master group's ansible_ssh_user to
+ pull bootstrap kubeconfig. (abutcher@redhat.com)
+- adding ability to add network policy objects. (shawn.hurley21@gmail.com)
+- add python2-boto3 package for centos-based origin-ansible container image
+ (jdiaz@redhat.com)
+- adding ability to interact with network resources. (shawn.hurley21@gmail.com)
+- Adding .ini to inventory_ignore_extensions (bedin@redhat.com)
+
+* Mon Jan 15 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.20.0
+- Adjust openstack provider dependencies versions (bdobreli@redhat.com)
+- Fix openstack provider playbook name in docs (bdobreli@redhat.com)
+- Install web console on upgrade (spadgett@redhat.com)
+- Add var for controller to enable async bindings (jpeeler@redhat.com)
+- Add cluster-operator playbook directory. (abutcher@redhat.com)
+- Move s3 & elb provisioning into their own playbooks s.t. they are applied
+ outside of the openshift_aws master provisioning tasks. (abutcher@redhat.com)
+- Update to AWS EC2 root vol size so that Health Check tasks pass
+ (mazzystr@gmail.com)
+- Configure Kuryr CNI daemon (mdulko@redhat.com)
+- Clean up host-local IPAM data while nodes are drained (danw@redhat.com)
+
* Fri Jan 12 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.19.0
-
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index d203b9cda..bdc98d1e0 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -198,3 +198,17 @@ At this point your cluster should be ready for workloads. Proceed to deploy app
### Still to come
There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities.
+
+## Uninstall / Deprovisioning
+
+At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step.
+
+To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml
+```
+
+This should result in removal of the security groups and VPC that were created.
+
+NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file.
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index e7bed4f6e..46c453333 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -1,8 +1,7 @@
#!/usr/bin/ansible-playbook
---
-- name: Setup the vpc and the master node group
+- name: Accept nodes
hosts: localhost
- remote_user: root
gather_facts: no
tasks:
- name: Alert user to variables needed - clusterid
@@ -17,37 +16,7 @@
import_role:
name: lib_openshift
- - name: fetch masters
- ec2_instance_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": master
- instance-state-name: running
- register: mastersout
- retries: 20
- delay: 3
- until: "'instances' in mastersout and mastersout.instances|length > 0"
-
- - name: fetch new node instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": node
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: "'instances' in instancesout and instancesout.instances|length > 0"
-
- - debug:
- msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
-
- - name: approve nodes
- oc_adm_csr:
- #approve_all: True
- nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
- timeout: 60
- register: nodeout
- delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
+ - name: accept nodes
+ import_role:
+ name: openshift_aws
+ tasks_from: accept_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml
deleted file mode 100644
index 9d9ed29de..000000000
--- a/playbooks/aws/openshift-cluster/hosted.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- import_playbook: ../../openshift-hosted/private/config.yml
-
-- import_playbook: ../../openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: ../../openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: ../../openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: ../../openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(false) | bool
-
-- import_playbook: ../../openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
- hosts: oo_first_master
- gather_facts: no
- tasks:
- - debug: msg="{{__deprecation_message}}"
- when:
- - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index a3fc82f9a..938e83f5e 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -18,29 +18,8 @@
- name: run the init
import_playbook: ../../init/main.yml
-- name: perform the installer openshift-checks
- import_playbook: ../../openshift-checks/private/install.yml
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
-- name: etcd install
- import_playbook: ../../openshift-etcd/private/config.yml
-
-- name: include nfs
- import_playbook: ../../openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- name: include loadbalancer
- import_playbook: ../../openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- name: include openshift-master config
- import_playbook: ../../openshift-master/private/config.yml
-
-- name: include master additional config
- import_playbook: ../../openshift-master/private/additional_config.yml
-
-- name: include master additional config
+- name: ensure the masters are configured as nodes
import_playbook: ../../openshift-node/private/config.yml
-
-- name: include openshift-glusterfs
- import_playbook: ../../openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index 7dde60b7d..d538b862d 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -1,8 +1,7 @@
---
-- name: Setup the elb and the master node group
+- name: Alert user to variables needed
hosts: localhost
tasks:
-
- name: Alert user to variables needed - clusterid
debug:
msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
@@ -11,6 +10,13 @@
debug:
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+- import_playbook: provision_s3.yml
+
+- import_playbook: provision_elb.yml
+
+- name: Create the master node group
+ hosts: localhost
+ tasks:
- name: provision cluster
import_role:
name: openshift_aws
diff --git a/playbooks/aws/openshift-cluster/provision_elb.yml b/playbooks/aws/openshift-cluster/provision_elb.yml
new file mode 100644
index 000000000..9f27dca3b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_elb.yml
@@ -0,0 +1,9 @@
+---
+- name: Create elb
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: provision elb
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_elb.yml
diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml
index f98f5be9a..bd154fa83 100644
--- a/playbooks/aws/openshift-cluster/provision_install.yml
+++ b/playbooks/aws/openshift-cluster/provision_install.yml
@@ -15,5 +15,5 @@
- name: Include the accept.yml playbook to accept nodes into the cluster
import_playbook: accept.yml
-- name: Include the hosted.yml playbook to finish the hosted configuration
- import_playbook: hosted.yml
+- name: Include the components playbook to finish the hosted configuration
+ import_playbook: ../../common/private/components.yml
diff --git a/playbooks/aws/openshift-cluster/provision_s3.yml b/playbooks/aws/openshift-cluster/provision_s3.yml
new file mode 100644
index 000000000..45b439083
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_s3.yml
@@ -0,0 +1,10 @@
+---
+- name: Create s3 bucket
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: create s3 bucket
+ include_role:
+ name: openshift_aws
+ tasks_from: s3.yml
+ when: openshift_aws_create_s3 | default(true) | bool
diff --git a/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
new file mode 100644
index 000000000..180c2281a
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: uninstall_sec_group.yml
+
+- import_playbook: uninstall_vpc.yml
+
+- import_playbook: uninstall_ssh_keypair.yml
diff --git a/playbooks/aws/openshift-cluster/uninstall_sec_group.yml b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml
new file mode 100644
index 000000000..642e5b169
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: delete security groups
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_security_group.yml
+ when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
new file mode 100644
index 000000000..ec9caa51b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: remove ssh keypair(s)
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_ssh_keys.yml
+ when: openshift_aws_users | default([]) | length > 0
diff --git a/playbooks/aws/openshift-cluster/uninstall_vpc.yml b/playbooks/aws/openshift-cluster/uninstall_vpc.yml
new file mode 100644
index 000000000..4c988bcc5
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_vpc.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: delete vpc
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_vpc.yml
+ when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/cluster-operator/aws/infrastructure.yml b/playbooks/cluster-operator/aws/infrastructure.yml
new file mode 100644
index 000000000..9669820fb
--- /dev/null
+++ b/playbooks/cluster-operator/aws/infrastructure.yml
@@ -0,0 +1,21 @@
+---
+- name: Alert user to variables needed
+ hosts: localhost
+ tasks:
+ - name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+ - name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+- import_playbook: ../../aws/openshift-cluster/provision_vpc.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_ssh_keypair.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_sec_group.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_s3.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_elb.yml
diff --git a/playbooks/cluster-operator/aws/roles b/playbooks/cluster-operator/aws/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/cluster-operator/aws/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 8ee83819e..ba783638d 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -5,7 +5,8 @@
g_new_master_hosts: []
g_new_node_hosts: []
-- import_playbook: ../../../init/facts.yml
+- import_playbook: ../../../init/basic_facts.yml
+- import_playbook: ../../../init/cluster_facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index fc1cbf32a..07be0b0d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -31,7 +31,7 @@
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
- hostvars[item].openshift is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 1b57521df..f790fd98d 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -1,7 +1,13 @@
---
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
+####################################################################################
+# Post upgrade - Upgrade web console, default router, default registry, and examples
+####################################################################################
+- name: Upgrade web console
+ hosts: oo_first_master
+ roles:
+ - role: openshift_web_console
+ when: openshift_web_console_install | default(true) | bool
+
- name: Upgrade default router and default registry
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 4c1156f4b..45ddf7eea 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -21,7 +21,7 @@
block:
- name: Check latest available OpenShift RPM version
repoquery:
- name: "{{ openshift_service_type }}"
+ name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
ignore_excluders: true
register: repoquery_out
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index eb5f07ae0..d88880140 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -14,7 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 8d42e4c91..ce069e2d0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -14,7 +14,7 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
index 0f74e0137..a9bf354cc 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
@@ -35,8 +35,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
index a2f316c25..3f26a6297 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
@@ -14,7 +14,8 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ when: not skip_version_info | default(false)
- name: Configure the upgrade target for the common upgrade tasks
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -47,8 +48,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
index 1d4d1919c..ed97d539c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
@@ -1,20 +1 @@
---
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.election.lockName'
- yaml_value: 'openshift-master-controllers'
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
- yaml_value: service-signer.crt
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
- yaml_value: service-signer.key
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
- yaml_key: servingInfo.clientCA
- yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index 552bea5e7..20e0c165e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -10,6 +10,7 @@
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
- import_playbook: ../pre/config.yml
vars:
@@ -31,8 +32,6 @@
# Pre-upgrade completed
- import_playbook: ../upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index ef9871008..0f48725f6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -14,14 +14,20 @@
- import_playbook: ../init.yml
vars:
l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-- name: Configure the upgrade target for the common upgrade tasks
+## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
+## If they've specified pkg_version or image_tag preserve that for later use
+- name: Configure the upgrade target for the common upgrade tasks 3.8
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tasks:
- set_fact:
- openshift_upgrade_target: '3.9'
+ openshift_upgrade_target: '3.8'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.8'
+ _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
- import_playbook: ../pre/config.yml
# These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -35,21 +41,57 @@
l_upgrade_verify_targets_hosts: "oo_masters_to_config"
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
+- name: Flag pre-upgrade checks complete for hosts without errors 3.8
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- set_fact:
pre_upgrade_complete: True
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
# Pre-upgrade completed
+- import_playbook: ../upgrade_control_plane.yml
+ vars:
+ openshift_release: '3.8'
+ when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+
+## 3.8 upgrade complete we should now be able to upgrade to 3.9
+
+- name: Configure the upgrade target for the common upgrade tasks 3.9
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tasks:
+ - meta: clear_facts
+ - set_fact:
+ openshift_upgrade_target: '3.9'
+ openshift_upgrade_min: '3.8'
+ openshift_release: '3.9'
+ openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
+ openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
+
+- import_playbook: ../pre/config.yml
+ # These vars a meant to exclude oo_nodes from plays that would otherwise include
+ # them by default.
+ vars:
+ l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+ l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+ l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+ l_upgrade_excluder_hosts: "oo_masters_to_config"
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
- import_playbook: ../upgrade_control_plane.yml
vars:
- master_config_hook: "v3_7/master_config_upgrade.yml"
+ openshift_release: '3.9'
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -58,13 +100,13 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift.common.service_type }}-master-controllers
+ - name: Stop {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: stopped
- - name: Start {{ openshift.common.service_type }}-master-controllers
+ - name: Start {{ openshift_service_type }}-master-controllers
systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
+ name: "{{ openshift_service_type }}-master-controllers"
state: started
- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
index 1d1b255c1..859b1d88b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
@@ -12,6 +12,7 @@
- set_fact:
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.7'
+ openshift_release: '3.9'
- import_playbook: ../pre/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
index 4bd2d87b1..d8540abfb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
@@ -1,5 +1,5 @@
---
-- name: Verify 3.9 specific upgrade checks
+- name: Verify 3.8 specific upgrade checks
hosts: oo_first_master
roles:
- { role: lib_openshift }
diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml
new file mode 100644
index 000000000..089645d07
--- /dev/null
+++ b/playbooks/common/private/components.yml
@@ -0,0 +1,38 @@
+---
+# These are the core component plays that configure the layers above the control
+# plane. A component is generally considered any part of OpenShift that runs on
+# top of the cluster and may be considered optional. Over time, much of OpenShift
+# above the Kubernetes apiserver and masters may be considered components.
+#
+# Preconditions:
+#
+# 1. The control plane is configured and reachable from nodes inside the cluster
+# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can
+# perform root level actions against the cluster
+# 3. On cloud providers, persistent volume provisioners are configured
+# 4. A subset of nodes is available to allow components to schedule - this must
+# include the masters and usually includes infra nodes.
+# 5. The init/main.yml playbook has been invoked
+
+- import_playbook: ../../openshift-glusterfs/private/config.yml
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-hosted/private/config.yml
+
+- import_playbook: ../../openshift-web-console/private/config.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- import_playbook: ../../openshift-metrics/private/config.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: ../../openshift-logging/private/config.yml
+ when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: ../../openshift-prometheus/private/config.yml
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: ../../openshift-service-catalog/private/config.yml
+ when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: ../../openshift-management/private/config.yml
+ when: openshift_management_install_management | default(false) | bool
diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml
new file mode 100644
index 000000000..0a5f1142b
--- /dev/null
+++ b/playbooks/common/private/control_plane.yml
@@ -0,0 +1,34 @@
+---
+# These are the control plane plays that configure a control plane on top of hosts
+# identified as masters. Over time, some of the pieces of the current control plane
+# may be moved to the components list.
+#
+# It is not required for any nodes to be configured, or passed to be configured,
+# when this playbook is invoked.
+#
+# Preconditions:
+#
+# 1. A set of machines have been identified to act as masters
+# 2. On cloud providers, a load balancer has been configured to point to the masters
+# and that load balancer has a DNS name
+# 3. The init/main.yml playbook has been invoked
+#
+# Postconditions:
+#
+# 1. The control plane is reachable from the outside of the cluster
+# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin
+# access.
+
+- import_playbook: ../../openshift-checks/private/install.yml
+
+- import_playbook: ../../openshift-etcd/private/config.yml
+
+- import_playbook: ../../openshift-nfs/private/config.yml
+ when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-loadbalancer/private/config.yml
+ when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-master/private/config.yml
+
+- import_playbook: ../../openshift-master/private/additional_config.yml
diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml
index 7fd60743c..8fb7b63e8 100644
--- a/playbooks/container-runtime/private/build_container_groups.yml
+++ b/playbooks/container-runtime/private/build_container_groups.yml
@@ -1,6 +1,8 @@
---
+# l_build_container_groups_hosts is passed in via prerequisites.yml during
+# etcd scaleup plays.
- name: create oo_hosts_containerized_managed_true host group
- hosts: oo_all_hosts:!oo_nodes_to_config
+ hosts: "{{ l_build_container_groups_hosts | default('oo_all_hosts:!oo_nodes_to_config') }}"
tasks:
- group_by:
- key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }}
+ key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }}
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
index 7a49adcf0..5396df20a 100644
--- a/playbooks/container-runtime/private/config.yml
+++ b/playbooks/container-runtime/private/config.yml
@@ -1,7 +1,13 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
+
- import_playbook: build_container_groups.yml
-- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_runtime_hosts) }}"
+ vars:
+ l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
roles:
- role: container_runtime
tasks:
diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml
index a6d396270..586149b1d 100644
--- a/playbooks/container-runtime/private/setup_storage.yml
+++ b/playbooks/container-runtime/private/setup_storage.yml
@@ -1,8 +1,13 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
+
- import_playbook: build_container_groups.yml
-- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_storage_hosts) }}"
vars:
+ l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
# role: container_runtime is necessary here to bring role default variables
diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml
index 5efdc486a..361553ee4 100644
--- a/playbooks/deploy_cluster.yml
+++ b/playbooks/deploy_cluster.yml
@@ -1,44 +1,11 @@
---
- import_playbook: init/main.yml
-- import_playbook: openshift-checks/private/install.yml
-
-- import_playbook: openshift-etcd/private/config.yml
-
-- import_playbook: openshift-nfs/private/config.yml
- when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-loadbalancer/private/config.yml
- when: groups.oo_lb_to_config | default([]) | count > 0
-
-- import_playbook: openshift-master/private/config.yml
-
-- import_playbook: openshift-master/private/additional_config.yml
+- import_playbook: common/private/control_plane.yml
- import_playbook: openshift-node/private/config.yml
-- import_playbook: openshift-glusterfs/private/config.yml
- when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-hosted/private/config.yml
-
-- import_playbook: openshift-web-console/private/config.yml
- when: openshift_web_console_install | default(true) | bool
-
-- import_playbook: openshift-metrics/private/config.yml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: openshift-logging/private/config.yml
- when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: openshift-prometheus/private/config.yml
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: openshift-service-catalog/private/config.yml
- when: openshift_enable_service_catalog | default(true) | bool
-
-- import_playbook: openshift-management/private/config.yml
- when: openshift_management_install_management | default(false) | bool
+- import_playbook: common/private/components.yml
- name: Print deprecated variable warning message if necessary
hosts: oo_first_master
diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml
new file mode 100644
index 000000000..75d0ddf9d
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_base_image.yml
@@ -0,0 +1,162 @@
+---
+# This playbook ensures that a base image is up to date with all of the required settings
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_root_image
+ fail:
+ msg: "A root OS image name or family is required for base image building. Please ensure `openshift_gcp_root_image` is defined."
+ when: openshift_gcp_root_image is undefined
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_root_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: build_instance_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- name: Prepare instance content sources
+ pre_tasks:
+ - set_fact:
+ allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}"
+ - set_fact:
+ using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}"
+ hosts: build_instance_ips
+ roles:
+ - role: rhel_subscribe
+ when: using_rhel_subscriptions
+ - role: openshift_repos
+ vars:
+ openshift_additional_repos: []
+ post_tasks:
+ - name: Add custom repositories
+ include_role:
+ name: openshift_gcp
+ tasks_from: add_custom_repositories.yml
+ - name: Add the Google Cloud repo
+ yum_repository:
+ name: google-cloud
+ description: Google Cloud Compute
+ baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
+ gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+ gpgcheck: yes
+ repo_gpgcheck: yes
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Add the jdetiber-qemu-user-static copr repo
+ yum_repository:
+ name: jdetiber-qemu-user-static
+ description: QEMU user static COPR
+ baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/
+ gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg
+ gpgcheck: yes
+ repo_gpgcheck: no
+ state: present
+ when: ansible_os_family == "RedHat"
+ - name: Install qemu-user-static
+ package:
+ name: qemu-user-static
+ state: present
+ - name: Start and enable systemd-binfmt service
+ systemd:
+ name: systemd-binfmt
+ state: started
+ enabled: yes
+
+- name: Build image
+ hosts: build_instance_ips
+ pre_tasks:
+ - name: Set up core host GCP configuration
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_gcp_base_image.yml
+ roles:
+ - role: os_update_latest
+ post_tasks:
+ - name: Disable all repos on RHEL
+ command: subscription-manager repos --disable="*"
+ when: using_rhel_subscriptions
+ - name: Enable repos for packages on RHEL
+ command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms"
+ when: using_rhel_subscriptions
+ - name: Install common image prerequisites
+ package: name={{ item }} state=latest
+ with_items:
+ # required by Ansible
+ - PyYAML
+ - docker
+ - google-compute-engine
+ - google-compute-engine-init
+ - google-config
+ - wget
+ - git
+ - net-tools
+ - bind-utils
+ - iptables-services
+ - bridge-utils
+ - bash-completion
+ - name: Clean yum metadata
+ command: yum clean all
+ args:
+ warn: no
+ when: ansible_os_family == "RedHat"
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml
new file mode 100644
index 000000000..787de8ebc
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/build_image.yml
@@ -0,0 +1,106 @@
+---
+- name: Verify prerequisites for image build
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Require openshift_gcp_base_image
+ fail:
+ msg: "A base image name or family is required for image building. Please ensure `openshift_gcp_base_image` is defined."
+ when: openshift_gcp_base_image is undefined
+
+- name: Launch image build instance
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+ openshift_master_unsupported_embedded_etcd: True
+
+ - name: Create the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ disk_type: pd-ssd
+ image: "{{ openshift_gcp_base_image }}"
+ size_gb: 10
+ state: present
+
+ - name: Launch the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ machine_type: n1-standard-1
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: present
+ tags:
+ - build-image-instance
+ disk_auto_delete: false
+ disks:
+ - "{{ openshift_gcp_prefix }}build-image-instance"
+ register: gce
+
+ - name: add host to nodes
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: nodes
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for instance to respond to SSH
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 120
+ with_items: "{{ gce.instance_data }}"
+
+- hosts: nodes
+ tasks:
+ - name: Set facts
+ set_fact:
+ openshift_node_bootstrap: True
+
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- import_playbook: ../../openshift-node/private/image_prep.yml
+
+# Add additional GCP specific behavior
+- hosts: nodes
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ tasks_from: node_cloud_config.yml
+ - include_role:
+ name: openshift_gcp
+ tasks_from: frequent_log_rotation.yml
+
+- name: Commit image
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate the image build instance
+ gce:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
+ - name: Save the new image
+ command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_image_name | default(openshift_gcp_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_image }}"
+ - name: Remove the image instance disk
+ gce_pd:
+ service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+ credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+ project_id: "{{ openshift_gcp_project }}"
+ zone: "{{ openshift_gcp_zone }}"
+ name: "{{ openshift_gcp_prefix }}build-image-instance"
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/deprovision.yml b/playbooks/gcp/openshift-cluster/deprovision.yml
new file mode 100644
index 000000000..589fddd2f
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/deprovision.yml
@@ -0,0 +1,10 @@
+# This playbook terminates a running cluster
+---
+- name: Terminate running cluster and remove all supporting resources in GCE
+ hosts: localhost
+ connection: local
+ tasks:
+ - include_role:
+ name: openshift_gcp
+ vars:
+ state: absent
diff --git a/playbooks/gcp/openshift-cluster/install.yml b/playbooks/gcp/openshift-cluster/install.yml
new file mode 100644
index 000000000..fb35b4348
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install.yml
@@ -0,0 +1,33 @@
+# This playbook installs onto a provisioned cluster
+---
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: place all scale groups into Ansible groups
+ include_role:
+ name: openshift_gcp
+ tasks_from: setup_scale_group_facts.yml
+
+- name: run the init
+ import_playbook: ../../init/main.yml
+
+- name: configure the control plane
+ import_playbook: ../../common/private/control_plane.yml
+
+- name: ensure the masters are configured as nodes
+ import_playbook: ../../openshift-node/private/config.yml
+
+- name: run the GCP specific post steps
+ import_playbook: install_gcp.yml
+
+- name: install components
+ import_playbook: ../../common/private/components.yml
+
+- hosts: primary_master
+ gather_facts: no
+ tasks:
+ - name: Retrieve cluster configuration
+ fetch:
+ src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+ dest: "/tmp/"
+ flat: yes
diff --git a/playbooks/gcp/openshift-cluster/install_gcp.yml b/playbooks/gcp/openshift-cluster/install_gcp.yml
new file mode 100644
index 000000000..09db78971
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/install_gcp.yml
@@ -0,0 +1,21 @@
+---
+- hosts: masters
+ gather_facts: no
+ tasks:
+ - name: create master health check service
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_healthcheck.yml
+ - name: configure node bootstrapping
+ include_role:
+ name: openshift_gcp
+ tasks_from: configure_master_bootstrap.yml
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - name: configure node bootstrap autoapprover
+ include_role:
+ name: openshift_bootstrap_autoapprover
+ tasks_from: main
+ when:
+ - openshift_master_bootstrap_enabled | default(False)
+ - openshift_master_bootstrap_auto_approve | default(False) | bool
diff --git a/playbooks/gcp/openshift-cluster/inventory.yml b/playbooks/gcp/openshift-cluster/inventory.yml
new file mode 100644
index 000000000..96de6d6db
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/inventory.yml
@@ -0,0 +1,10 @@
+---
+- name: Set up the connection variables for retrieving inventory from GCE
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: materialize the inventory
+ include_role:
+ name: openshift_gcp
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/launch.yml b/playbooks/gcp/openshift-cluster/launch.yml
new file mode 100644
index 000000000..02f00408a
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/launch.yml
@@ -0,0 +1,12 @@
+# This playbook launches a new cluster or converges it if already launched
+---
+- import_playbook: build_image.yml
+ when: openshift_gcp_build_image | default(False) | bool
+
+- import_playbook: provision.yml
+
+- hosts: localhost
+ tasks:
+ - meta: refresh_inventory
+
+- import_playbook: install.yml
diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml
index b6edf9961..293a195c9 100644
--- a/playbooks/gcp/provision.yml
+++ b/playbooks/gcp/openshift-cluster/provision.yml
@@ -3,11 +3,10 @@
hosts: localhost
connection: local
gather_facts: no
+ roles:
+ - openshift_gcp
tasks:
-
- - name: provision a GCP cluster in the specified project
+ - name: recalculate the dynamic inventory
import_role:
name: openshift_gcp
-
-- name: run the cluster deploy
- import_playbook: ../deploy_cluster.yml
+ tasks_from: dynamic_inventory.yml
diff --git a/playbooks/gcp/openshift-cluster/publish_image.yml b/playbooks/gcp/openshift-cluster/publish_image.yml
new file mode 100644
index 000000000..76fd49e9c
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/publish_image.yml
@@ -0,0 +1,9 @@
+---
+- name: Publish the most recent image
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - import_role:
+ name: openshift_gcp
+ tasks_from: publish_image.yml
diff --git a/playbooks/gcp/openshift-cluster/roles b/playbooks/gcp/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gcp/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index 15b3dd492..e1052fb6c 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -1,6 +1,8 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+
- name: Install packages necessary for installer
- hosts: oo_all_hosts
+ hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
any_errors_fatal: true
tasks:
- when:
diff --git a/playbooks/init/facts.yml b/playbooks/init/basic_facts.yml
index 8e4206948..06a4e7291 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -4,15 +4,13 @@
any_errors_fatal: true
tasks:
-- name: Initialize host facts
- # l_upgrade_non_node_hosts is passed in via play during control-plane-only
- # upgrades; otherwise oo_all_hosts is used.
- hosts: "{{ l_upgrade_non_node_hosts | default('oo_all_hosts') }}"
+- name: Initialize basic host facts
+ # l_init_fact_hosts is passed in via play during control-plane-only
+ # upgrades and scale-up plays; otherwise oo_all_hosts is used.
+ hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+ roles:
+ - role: openshift_facts
tasks:
- - name: load openshift_facts module
- import_role:
- name: openshift_facts
-
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
import_role:
@@ -58,41 +56,6 @@
- l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
- - name: Gather Cluster facts
- openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ openshift_deployment_type }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
-
- - name: Set fact of no_proxy_internal_hostnames
- openshift_facts:
- role: common
- local_facts:
- no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
- - name: Initialize openshift.node.sdn_mtu
- openshift_facts:
- role: node
- local_facts:
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
-
- name: Initialize special first-master variables
hosts: oo_first_master
roles:
diff --git a/playbooks/init/cluster_facts.yml b/playbooks/init/cluster_facts.yml
new file mode 100644
index 000000000..636679e32
--- /dev/null
+++ b/playbooks/init/cluster_facts.yml
@@ -0,0 +1,42 @@
+---
+- name: Initialize cluster facts
+ # l_init_fact_hosts is passed in via play during control-plane-only
+ # upgrades and scale-up plays; otherwise oo_all_hosts is used.
+ hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Gather Cluster facts
+ openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+ - name: Initialize openshift.node.sdn_mtu
+ openshift_facts:
+ role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 8a3f4682d..9886691e0 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -1,4 +1,7 @@
---
+# skip_verison and l_install_base_packages are passed in via prerequistes.yml.
+# skip_sanity_checks is passed in via openshift-node/private/image_prep.yml
+
- name: Initialization Checkpoint Start
hosts: all
gather_facts: false
@@ -15,7 +18,13 @@
- import_playbook: evaluate_groups.yml
-- import_playbook: facts.yml
+- import_playbook: basic_facts.yml
+
+# base_packages needs to be setup for openshift_facts.py to run correctly.
+- import_playbook: base_packages.yml
+ when: l_install_base_packages | default(False) | bool
+
+- import_playbook: cluster_facts.yml
- import_playbook: version.yml
when: not (skip_verison | default(False))
diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml
index 667f38ddd..655a7e83a 100644
--- a/playbooks/init/repos.yml
+++ b/playbooks/init/repos.yml
@@ -1,6 +1,8 @@
---
+# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+
- name: Setup yum repositories for all hosts
- hosts: oo_all_hosts
+ hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
gather_facts: no
tasks:
- name: subscribe instances to Red Hat Subscription Manager
diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml
index 52bcf42c0..fbbb3f8fb 100644
--- a/playbooks/init/sanity_checks.yml
+++ b/playbooks/init/sanity_checks.yml
@@ -1,4 +1,5 @@
---
+# l_sanity_check_hosts may be passed in during scale-up plays
- name: Verify Requirements
hosts: oo_first_master
roles:
@@ -11,5 +12,5 @@
# Thus, sanity_checks cannot gather new information about any hosts.
- name: Run variable sanity checks
sanity_checks:
- check_hosts: "{{ groups['oo_all_hosts'] }}"
+ check_hosts: "{{ l_sanity_check_hosts | default(groups['oo_all_hosts']) }}"
run_once: True
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
index 86e0b2416..b49f7dd08 100644
--- a/playbooks/init/validate_hostnames.yml
+++ b/playbooks/init/validate_hostnames.yml
@@ -25,7 +25,7 @@
when:
- lookupip.stdout != '127.0.0.1'
- lookupip.stdout not in ansible_all_ipv4_addresses
- - openshift_hostname_check | default(true)
+ - openshift_hostname_check | default(true) | bool
- name: Validate openshift_ip exists on node when defined
fail:
@@ -40,4 +40,4 @@
when:
- openshift_ip is defined
- openshift_ip not in ansible_all_ipv4_addresses
- - openshift_ip_check | default(true)
+ - openshift_ip_check | default(true) | bool
diff --git a/playbooks/openshift-checks/adhoc.yml b/playbooks/openshift-checks/adhoc.yml
index 414090733..249222ae4 100644
--- a/playbooks/openshift-checks/adhoc.yml
+++ b/playbooks/openshift-checks/adhoc.yml
@@ -11,6 +11,7 @@
# usage. Running this play only in localhost speeds up execution.
hosts: localhost
connection: local
+ gather_facts: false
roles:
- openshift_health_checker
vars:
diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml
index 7e9ab6834..656454fe3 100644
--- a/playbooks/openshift-etcd/scaleup.yml
+++ b/playbooks/openshift-etcd/scaleup.yml
@@ -1,4 +1,51 @@
---
+- import_playbook: ../init/evaluate_groups.yml
+
+- name: Ensure there are new_etcd
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ Detected no new_etcd in inventory. Please add hosts to the
+ new_etcd host group to add etcd hosts.
+ when:
+ - g_new_etcd_hosts | default([]) | length == 0
+
+ - fail:
+ msg: >
+ Detected new_etcd host is member of new_masters or new_nodes. Please
+ run playbooks/openshift-master/scaleup.yml or
+ playbooks/openshift-node/scaleup.yml before running this play.
+ when: >
+ inventory_hostname in (groups['new_masters'] | default([]))
+ or inventory_hostname in (groups['new_nodes'] | default([]))
+
+# We only need to run this if etcd is being installed on a standalone host;
+# If etcd is part of master or node group, there's no need to
+# re-run prerequisites
+- import_playbook: ../prerequisites.yml
+ vars:
+ # We need to ensure container_runtime is only processed for containerized
+ # etcd hosts by setting l_build_container_groups_hosts and l_etcd_scale_up_hosts
+ l_build_container_groups_hosts: "oo_new_etcd_to_config"
+ l_etcd_scale_up_hosts: "oo_hosts_containerized_managed_true"
+ l_scale_up_hosts: "oo_new_etcd_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
+ when:
+ - inventory_hostname not in groups['oo_masters']
+ - inventory_hostname not in groups['oo_nodes_to_config']
+
+# If this etcd host is part of a master or node, we don't need to run
+# prerequisites, we can just init facts as normal.
- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config"
+ when:
+ - inventory_hostname in groups['oo_masters']
+ - inventory_hostname in groups['oo_nodes_to_config']
- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
index ccc797527..77999d92c 100644
--- a/playbooks/openshift-etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -1,4 +1,8 @@
---
-- import_playbook: ../init/evaluate_groups.yml
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openshift-grafana/config.yml b/playbooks/openshift-grafana/config.yml
new file mode 100644
index 000000000..c7814207c
--- /dev/null
+++ b/playbooks/openshift-grafana/config.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
diff --git a/playbooks/openshift-grafana/private/config.yml b/playbooks/openshift-grafana/private/config.yml
new file mode 100644
index 000000000..ac753d63b
--- /dev/null
+++ b/playbooks/openshift-grafana/private/config.yml
@@ -0,0 +1,6 @@
+---
+- name: Deploy grafana server
+ hosts: masters
+ tasks:
+ - include_role:
+ name: openshift_grafana
diff --git a/playbooks/openshift-grafana/private/filter_plugins b/playbooks/openshift-grafana/private/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/openshift-grafana/private/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/openshift-grafana/private/lookup_plugins b/playbooks/openshift-grafana/private/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/openshift-grafana/private/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/openshift-grafana/private/roles b/playbooks/openshift-grafana/private/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/openshift-grafana/private/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml
index 54c8483c8..4a83dd955 100644
--- a/playbooks/openshift-loadbalancer/private/config.yml
+++ b/playbooks/openshift-loadbalancer/private/config.yml
@@ -24,7 +24,7 @@
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
- openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ openshift_image_tag: "{{ hostvars[groups.oo_masters_to_config.0].openshift_image_tag }}"
roles:
- role: openshift_loadbalancer
- role: tuned
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
index 7d31340a2..09e205afc 100644
--- a/playbooks/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/scaleup.yml
@@ -1,22 +1,43 @@
---
- import_playbook: ../init/evaluate_groups.yml
-- name: Ensure there are new_masters or new_nodes
+- name: Ensure there are new_masters and new_nodes
hosts: localhost
connection: local
gather_facts: no
tasks:
- fail:
+ # new_masters must be part of new_nodes as well; otherwise if new_nodes
+ # is not present, oo_nodes_to_config will contain all existing nodes.
msg: >
- Detected no new_masters or no new_nodes in inventory. Please
- add hosts to the new_masters and new_nodes host groups to add
- masters.
- when:
- - g_new_master_hosts | default([]) | length == 0
- - g_new_node_hosts | default([]) | length == 0
+ Detected no new_masters and/or no new_nodes in inventory. New
+ masters must be part of both new_masters and new_nodes groups.
+ If you are adding just new_nodes, use the
+ playbooks/openshift-node/scaleup.yml play.
+ when: >
+ g_new_master_hosts | default([]) | length == 0
+ or g_new_node_hosts | default([]) | length == 0
-# Need a better way to do the above check for node without
-# running evaluate_groups and init/main.yml
-- import_playbook: ../init/main.yml
+- name: Ensure there are new_masters and new_nodes
+ hosts: oo_masters_to_config
+ connection: local
+ gather_facts: no
+ tasks:
+ - fail:
+ # new_masters must be part of new_nodes as well;
+ msg: >
+ Each host in new_masters must also appear in new_nodes
+ when: inventory_hostname not in groups['oo_nodes_to_config']
+
+- import_playbook: ../prerequisites.yml
+ vars:
+ l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
+
+- import_playbook: ../init/version.yml
+ vars:
+ l_openshift_version_set_hosts: "oo_masters_to_config:oo_nodes_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:oo_nodes_to_config"
- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index cf13692ae..9cc7263b7 100644
--- a/playbooks/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -12,9 +12,27 @@
new_nodes host group to add nodes.
when:
- g_new_node_hosts | default([]) | length == 0
+ - fail:
+ msg: >
+ Please run playbooks/openshift-master/scaleup.yml if you need to
+ scale up both masters and nodes. This playbook is only needed if
+ you are only adding new nodes and not new masters.
+ when:
+ - g_new_node_hosts | default([]) | length > 0
+ - g_new_master_hosts | default([]) | length > 0
+
+# if g_new_node_hosts is not empty, oo_nodes_to_config will be set to
+# g_new_node_hosts via evaluate_groups.yml
+
+- import_playbook: ../prerequisites.yml
+ vars:
+ l_scale_up_hosts: "oo_nodes_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
-# Need a better way to do the above check for node without
-# running evaluate_groups and init/main.yml
-- import_playbook: ../init/main.yml
+- import_playbook: ../init/version.yml
+ vars:
+ l_openshift_version_set_hosts: "oo_nodes_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_nodes_to_config"
- import_playbook: private/config.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index d361d6278..842bb34de 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -30,15 +30,17 @@ version 10) or newer. It must also satisfy these requirements:
- look at
the [Minimum Hardware Requirements page][hardware-requirements]
for production
-* The keypair for SSH must be available in openstack
-* `keystonerc` file that lets you talk to the openstack services
+* The keypair for SSH must be available in OpenStack
+* `keystonerc` file that lets you talk to the OpenStack services
* NOTE: only Keystone V2 is currently supported
+* A host with the supported version of [Ansible][ansible] installed, see the
+ [Setup section of the openshift-ansible README][openshift-ansible-setup]
+ for details on the requirements.
Optional:
* External Neutron network with a floating IP address pool
-
## Installation
There are four main parts to the installation:
@@ -68,12 +70,11 @@ First, you need to select where to run [Ansible][ansible] from (the
*Ansible host*). This can be the computer you read this guide on or an
OpenStack VM you'll create specifically for this purpose.
-We will use
-a
+This guide will use a
[Docker image that has all the dependencies installed][control-host-image] to
make things easier. If you don't want to use Docker, take a look at
the [Ansible host dependencies][ansible-dependencies] and make sure
-they're installed.
+they are installed.
Your *Ansible host* needs to have the following:
@@ -183,13 +184,16 @@ Then run the provision + install playbook -- this will create the OpenStack
resources:
```bash
-$ ansible-playbook --user openshift -i inventory \
- openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \
- -e openshift_repos_enable_testing=true
+$ ansible-playbook --user openshift \
+ -i openshift-ansible/playbooks/openstack/inventory.py \
+ -i inventory \
+ openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml
```
-Note, you may want to use the testing repo for development purposes only.
-Normally, `openshift_repos_enable_testing` should not be specified.
+In addition to *your* inventory with your OpenShift and OpenStack
+configuration, we are also supplying the [dynamic inventory][dynamic] from
+`openshift-ansible/inventory`. It's a script that will look at the Nova servers
+and other resources that will be created and let Ansible know about them.
If you're using multiple inventories, make sure you pass the path to
the right one to `-i`.
@@ -219,6 +223,7 @@ advanced configuration:
[ansible]: https://www.ansible.com/
[openshift-ansible]: https://github.com/openshift/openshift-ansible
+[openshift-ansible-setup]: https://github.com/openshift/openshift-ansible#setup
[devstack]: https://docs.openstack.org/devstack/
[tripleo]: http://tripleo.org/
[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node
@@ -233,3 +238,4 @@ advanced configuration:
[loadbalancer]: ./advanced-configuration.md#multi-master-configuration
[external-dns]: ./advanced-configuration.md#dns-configuration-variables
[cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry
+[dynamic]: http://docs.ansible.com/ansible/latest/intro_dynamic_inventory.html
diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md
index afa56d168..e8f4cfc32 100644
--- a/playbooks/openstack/advanced-configuration.md
+++ b/playbooks/openstack/advanced-configuration.md
@@ -1,9 +1,8 @@
## Dependencies for localhost (ansible control/admin node)
-* [Ansible 2.3](https://pypi.python.org/pypi/ansible)
-* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps)
-* [jinja2](http://jinja.pocoo.org/docs/2.9/)
-* [shade](https://pypi.python.org/pypi/shade)
+* [Ansible](https://pypi.python.org/pypi/ansible) version >=2.4.0
+* [jinja2](http://jinja.pocoo.org/docs/2.9/) version >= 2.10
+* [shade](https://pypi.python.org/pypi/shade) version >= 1.26
* python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath)
* python-dns / [dnspython](https://pypi.python.org/pypi/dnspython)
* Become (sudo) is not required.
diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/inventory.py
index 76e658eb7..76e658eb7 100755
--- a/playbooks/openstack/sample-inventory/inventory.py
+++ b/playbooks/openstack/inventory.py
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index a38d7bff7..73c1926a0 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -26,8 +26,8 @@
- name: Gather facts for the new nodes
setup:
-- name: set common facts
- import_playbook: ../../init/facts.yml
+- import_playbook: ../../init/basic_facts.yml
+- import_playbook: ../../init/cluster_facts.yml
# TODO(shadower): consider splitting this up so people can stop here
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index a8663f946..1287b25f3 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -43,7 +43,7 @@ openshift_hosted_registry_wait: True
# NOTE(shadower): the hostname check seems to always fail because the
# host's floating IP address doesn't match the address received from
# inside the host.
-openshift_override_hostname_check: true
+openshift_hostname_check: false
# For POCs or demo environments that are using smaller instances than
# the official recommended values for RAM and DISK, uncomment the line below.
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 7802f83d9..0b76ca862 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -1,18 +1,21 @@
---
+# l_scale_up_hosts may be passed in via various scaleup plays.
+
- import_playbook: init/main.yml
vars:
skip_verison: True
+ l_install_base_packages: True
- import_playbook: init/validate_hostnames.yml
when: not (skip_validate_hostnames | default(False))
- import_playbook: init/repos.yml
-- import_playbook: init/base_packages.yml
-
# This is required for container runtime for crio, only needs to run once.
- name: Configure os_firewall
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config
+ hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}"
+ vars:
+ l_default_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config"
roles:
- role: os_firewall
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index ba2f7293b..f869b5fae 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -72,6 +72,15 @@
- apiGroups: ["image.openshift.io", ""]
resources: ["images"]
verbs: ["get", "list"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["clusternetworks", "netnamespaces"]
+ verbs: ["get"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["netnamespaces"]
+ verbs: ["update"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["networkpolicies"]
+ verbs: ["create", "delete"]
- name: Create asb-access cluster role
oc_clusterrole:
@@ -366,6 +375,11 @@
secret:
secretName: etcd-auth-secret
+- name: set auth name and type facts if needed
+ set_fact:
+ ansible_service_broker_registry_auth_type: "secret"
+ ansible_service_broker_registry_auth_name: "asb-registry-auth"
+ when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
- name: Create config map for ansible-service-broker
@@ -393,6 +407,8 @@
org: {{ ansible_service_broker_registry_organization }}
tag: {{ ansible_service_broker_registry_tag }}
white_list: {{ ansible_service_broker_registry_whitelist | to_yaml }}
+ auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}"
+ auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}"
- type: local_openshift
name: localregistry
namespaces: ['openshift']
@@ -438,6 +454,7 @@
data: "{{ ansible_service_broker_registry_user }}"
- path: password
data: "{{ ansible_service_broker_registry_password }}"
+ when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
- name: Create the Broker resource in the catalog
oc_obj:
diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml
index 05415a4d6..834ebba64 100644
--- a/roles/calico_master/tasks/main.yml
+++ b/roles/calico_master/tasks/main.yml
@@ -23,7 +23,7 @@
-f {{ mktemp.stdout }}/calico-policy-controller.yml
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
register: calico_create_output
- failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout)
+ failed_when: "('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) and calico_create_output.rc != 0"
changed_when: ('created' in calico_create_output.stdout)
- name: Calico Master | Delete temp directory
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index d0e37e2f4..8203d15f5 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -101,45 +101,34 @@ l_crt_crio_image_tag_dict:
openshift-enterprise: "{{ l_openshift_image_tag }}"
origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}"
-l_crt_crio_image_prepend_dict:
- openshift-enterprise: "registry.access.redhat.com/openshift3"
- origin: "docker.io/gscrivano"
-
l_crt_crio_image_dict:
- Fedora:
- crio_image_name: "cri-o-fedora"
- crio_image_tag: "latest"
- CentOS:
- crio_image_name: "cri-o-centos"
- crio_image_tag: "latest"
- RedHat:
- crio_image_name: "cri-o"
- crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}"
-
-l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}"
-l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}"
-l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
-
-l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
+ Fedora: "registry.fedoraproject.org/latest/cri-o"
+ CentOS: "registry.centos.org/projectatomic/cri-o"
+ RedHat: "registry.access.redhat.com/openshift3/cri-o"
+
+l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
+l_crio_image_tag: "{{ l_crt_crio_image_tag_dict[openshift_deployment_type] }}"
+
+l_crio_image_default: "{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}"
# ----------------------- #
# systemcontainers_docker #
# ----------------------- #
-l_crt_docker_image_prepend_dict:
- Fedora: "registry.fedoraproject.org/latest"
- Centos: "docker.io/gscrivano"
- RedHat: "registry.access.redhat.com/openshift3"
+l_crt_docker_image_dict:
+ Fedora: "registry.fedoraproject.org/latest/docker"
+ Centos: "registry.centos.org/projectatomic/docker"
+ RedHat: "registry.access.redhat.com/openshift3/container-engine"
openshift_docker_image_tag_default: "latest"
l_crt_docker_image_tag_dict:
openshift-enterprise: "{{ l_openshift_image_tag }}"
origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}"
-l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}"
+l_docker_image_prepend: "{{ l_crt_docker_image_dict[ansible_distribution] }}"
l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}"
-l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
+l_docker_image_default: "{{ l_docker_image_prepend }}:{{ l_docker_image_tag }}"
l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}"
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index d6e7e7fed..ed9a2709b 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -1,6 +1,17 @@
---
- include_tasks: common/pre.yml
+# In some cases, some services may be run as containers and docker may still
+# be installed via rpm.
+- include_tasks: common/atomic_proxy.yml
+ when:
+ - >
+ (openshift_use_system_containers | default(False)) | bool
+ or (openshift_use_etcd_system_container | default(False)) | bool
+ or (openshift_use_openvswitch_system_container | default(False)) | bool
+ or (openshift_use_node_system_container | default(False)) | bool
+ or (openshift_use_master_system_container | default(False)) | bool
+
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift_is_atomic | bool
diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index 881a8c270..cab835e20 100644
--- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -1,7 +1,7 @@
---
- name: Install etcd for etcdctl
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
- when: not openshift_is_atomic | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
index 7634b8192..38d2f748b 100644
--- a/roles/flannel/meta/main.yml
+++ b/roles/flannel/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index 08f2d5adc..41d0ead20 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -40,7 +40,7 @@
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
- line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+ line: 'OPTIONS="\1 --disable proxy"'
- name: force node restart to disable the proxy
service:
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
index 39348ae90..09f4c7dfe 100644
--- a/roles/kuryr/templates/cni-daemonset.yaml.j2
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -26,6 +26,13 @@ spec:
image: kuryr/cni:latest
imagePullPolicy: IfNotPresent
command: [ "cni_ds_init" ]
+ env:
+ - name: CNI_DAEMON
+ value: "True"
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
@@ -38,6 +45,10 @@ spec:
subPath: kuryr-cni.conf
- name: etc
mountPath: /etc
+ - name: proc
+ mountPath: /host_proc
+ - name: openvswitch
+ mountPath: /var/run/openvswitch
volumes:
- name: bin
hostPath:
@@ -50,4 +61,10 @@ spec:
name: kuryr-config
- name: etc
hostPath:
- path: /etc \ No newline at end of file
+ path: /etc
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: openvswitch
+ hostPath:
+ path: /var/run/openvswitch
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index 96c215f00..4bf1dbddf 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -16,17 +16,17 @@ data:
# Directory for Kuryr vif binding executables. (string value)
#bindir = /usr/libexec/kuryr
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false
- # DEPRECATED: If set to false, the logging level will be set to WARNING instead
- # of the default INFO level. (boolean value)
- # This option is deprecated for removal.
- # Its value may be silently ignored in the future.
- #verbose = true
-
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
@@ -46,7 +46,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/kuryr-controller.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -65,13 +65,19 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
- #use_stderr = true
+ #use_stderr = false
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
@@ -93,7 +99,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -106,15 +112,86 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
- driver = kuryr.lib.binding.drivers.vlan
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
link_iface = eth0
+ driver = kuryr.lib.binding.drivers.vlan
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
+
[kubernetes]
#
@@ -164,11 +241,6 @@ data:
# The driver that manages VIFs pools for Kubernetes Pods (string value)
vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
- [vif_pool]
- ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
- ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
- ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
- ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
[neutron]
# Configuration options for OpenStack Neutron
@@ -232,13 +304,55 @@ data:
external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
[pod_vif_nested]
+
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
+
kuryr-cni.conf: |+
[DEFAULT]
#
# From kuryr_kubernetes
#
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
@@ -263,7 +377,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/cni.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -282,6 +396,12 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
@@ -310,7 +430,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -323,14 +443,85 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
+ link_iface = eth0
driver = kuryr.lib.binding.drivers.vlan
- link_iface = {{ kuryr_cni_link_interface }}
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
[kubernetes]
@@ -341,12 +532,136 @@ data:
# The root URL of the Kubernetes API (string value)
api_root = {{ openshift.master.api_url }}
- # The token to talk to the k8s API
- token_file = /etc/kuryr/token
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
# Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
- ssl_ca_crt_file = /etc/kuryr/ca.crt
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
# HTTPS K8S_API server identity verification (boolean value)
# TODO (apuimedo): Make configurable
ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+ # The driver that manages VIFs pools for Kubernetes Pods (string value)
+ vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
index fc14b5633..ef996fefe 100644
--- a/roles/lib_utils/filter_plugins/oo_filters.py
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -4,6 +4,7 @@
"""
Custom filters for use in openshift-ansible
"""
+import json
import os
import pdb
import random
@@ -21,13 +22,10 @@ import yaml
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
- from ansible.compat.six.moves.urllib.parse import urlparse
-except ImportError:
- from ansible.module_utils.six import string_types, u
- from ansible.module_utils.six.moves.urllib.parse import urlparse
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six import string_types, u
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.parse import urlparse
HAS_OPENSSL = False
try:
@@ -589,6 +587,18 @@ that result to this filter plugin.
return secret_name
+def lib_utils_oo_l_of_d_to_csv(input_list):
+ """Map a list of dictionaries, input_list, into a csv string
+ of json values.
+
+ Example input:
+ [{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}]
+ Example output:
+ u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}'
+ """
+ return ','.join(json.dumps(x) for x in input_list)
+
+
def map_from_pairs(source, delim="="):
''' Returns a dict given the source and delim delimited '''
if source == '':
@@ -626,5 +636,6 @@ class FilterModule(object):
"lib_utils_oo_contains_rule": lib_utils_oo_contains_rule,
"lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
"lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
+ "lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv,
"map_from_pairs": map_from_pairs
}
diff --git a/roles/lib_utils/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py
index ff15f693b..e67b19c28 100644
--- a/roles/lib_utils/filter_plugins/openshift_master.py
+++ b/roles/lib_utils/filter_plugins/openshift_master.py
@@ -10,11 +10,7 @@ from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
-except ImportError:
- from ansible.module_utils.six import string_types, u
+from ansible.module_utils.six import string_types, u
import yaml
diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py
index d4674845e..b94c0b779 100644
--- a/roles/lib_utils/library/docker_creds.py
+++ b/roles/lib_utils/library/docker_creds.py
@@ -135,7 +135,7 @@ def update_config(docker_config, registry, username, password):
docker_config['auths'][registry] = {}
# base64 encode our username:password string
- encoded_data = base64.b64encode('{}:{}'.format(username, password))
+ encoded_data = base64.b64encode('{}:{}'.format(username, password).encode())
# check if the same value is already present for idempotency.
if 'auth' in docker_config['auths'][registry]:
@@ -151,7 +151,7 @@ def write_config(module, docker_config, dest):
conf_file_path = os.path.join(dest, 'config.json')
try:
with open(conf_file_path, 'w') as conf_file:
- json.dump(docker_config, conf_file, indent=8)
+ json.dump(docker_config.decode(), conf_file, indent=8)
except IOError as ioerror:
result = {'failed': True,
'changed': False,
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 8c8227b5e..a729e8dbd 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -98,12 +98,20 @@ openshift_aws_elb_dict:
proxy_protocol: True
openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: False
openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
@@ -293,3 +301,7 @@ openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
openshift_aws_masters_groups: masters,etcd,nodes
+
+# By default, don't delete things like the shared IAM instance
+# profile and uploaded ssh keys
+openshift_aws_enable_uninstall_shared_objects: False
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index c2a2cea30..db30fe5c9 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: setup_master_group.yml
+
- name: fetch masters
ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
@@ -36,4 +38,4 @@
nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
timeout: 60
register: nodeout
- delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
+ delegate_to: "{{ groups.masters.0 }}"
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 786a2e4cf..2b5f317d8 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,23 +1,6 @@
---
-- when: openshift_aws_create_iam_cert | bool
- name: create the iam_cert for elb certificate
- include_tasks: iam_cert.yml
-
-- when: openshift_aws_create_s3 | bool
- name: create s3 bucket for registry
- include_tasks: s3.yml
-
- include_tasks: vpc_and_subnet_id.yml
-- name: create elbs
- include_tasks: elb.yml
- with_dict: "{{ openshift_aws_elb_dict }}"
- vars:
- l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
- l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
- loop_control:
- loop_var: l_elb_dict_item
-
- name: include scale group creation for master
include_tasks: build_node_group.yml
with_items: "{{ openshift_aws_master_group }}"
diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml
new file mode 100644
index 000000000..a52f63bd5
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_elb.yml
@@ -0,0 +1,15 @@
+---
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include_tasks: iam_cert.yml
+
+- include_tasks: vpc_and_subnet_id.yml
+
+- name: create elbs
+ include_tasks: elb.yml
+ with_dict: "{{ openshift_aws_elb_dict }}"
+ vars:
+ l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
+ l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
+ loop_control:
+ loop_var: l_elb_dict_item
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index d82f18574..9105b5b4c 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -2,25 +2,12 @@
# Get bootstrap config token
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
-- name: fetch master instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until:
- - "'instances' in instancesout"
- - instancesout.instances|length > 0
+- include_tasks: setup_master_group.yml
- name: slurp down the bootstrap.kubeconfig
slurp:
src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
+ delegate_to: "{{ groups.masters.0 }}"
register: bootstrap
- name: set_fact for kubeconfig token
diff --git a/roles/openshift_aws/tasks/uninstall_security_group.yml b/roles/openshift_aws/tasks/uninstall_security_group.yml
new file mode 100644
index 000000000..55d40e8ec
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_security_group.yml
@@ -0,0 +1,14 @@
+---
+- name: delete the node group sgs
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name}}"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: delete the k8s sgs for the node group
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name }}_k8s"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
diff --git a/roles/openshift_aws/tasks/uninstall_ssh_keys.yml b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
new file mode 100644
index 000000000..27e42da53
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
@@ -0,0 +1,9 @@
+---
+- name: Remove the public keys for the user(s)
+ ec2_key:
+ state: absent
+ name: "{{ item.key_name }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
+ no_log: True
+ when: openshift_aws_enable_uninstall_shared_objects | bool
diff --git a/roles/openshift_aws/tasks/uninstall_vpc.yml b/roles/openshift_aws/tasks/uninstall_vpc.yml
new file mode 100644
index 000000000..ecf39f694
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_vpc.yml
@@ -0,0 +1,36 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+- debug:
+ var: vpcout
+ verbosity: 1
+
+- when: vpcout.vpcs | length > 0
+ block:
+ - name: delete the vpc igw
+ ec2_vpc_igw:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: igw
+
+ - name: delete the vpc subnets
+ ec2_vpc_subnet:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
+
+ - name: Delete AWS VPC
+ ec2_vpc_net:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
+ register: vpc
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml
new file mode 100644
index 000000000..90ee40943
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-policy.yaml
@@ -0,0 +1,10 @@
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: bootstrap-autoapprover
+roleRef:
+ kind: ClusterRole
+ name: system:node-bootstrap-autoapprover
+subjects:
+- kind: User
+ name: system:serviceaccount:openshift-infra:bootstrap-autoapprover
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml
new file mode 100644
index 000000000..d8143d047
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-role.yaml
@@ -0,0 +1,21 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: system:node-bootstrap-autoapprover
+rules:
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests
+ verbs:
+ - delete
+ - get
+ - list
+ - watch
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests/approval
+ verbs:
+ - create
+ - update
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml
new file mode 100644
index 000000000..e22ce6f34
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller-serviceaccount.yaml
@@ -0,0 +1,5 @@
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+ name: bootstrap-autoapprover
+ namespace: openshift-infra
diff --git a/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml
new file mode 100644
index 000000000..dbcedb407
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml
@@ -0,0 +1,68 @@
+kind: StatefulSet
+apiVersion: apps/v1beta1
+metadata:
+ name: bootstrap-autoapprover
+ namespace: openshift-infra
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: bootstrap-autoapprover
+ spec:
+ serviceAccountName: bootstrap-autoapprover
+ terminationGracePeriodSeconds: 1
+ containers:
+ - name: signer
+ image: openshift/node:v3.7.0-rc.0
+ command:
+ - /bin/bash
+ - -c
+ args:
+ - |
+ #!/bin/bash
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+
+ unset KUBECONFIG
+ cat <<SCRIPT > /tmp/signer
+ #!/bin/bash
+ #
+ # It will approve any CSR that is not approved yet, and delete any CSR that expired more than 60 seconds
+ # ago.
+ #
+
+ set -o errexit
+ set -o nounset
+ set -o pipefail
+
+ name=\${1}
+ condition=\${2}
+ certificate=\${3}
+ username=\${4}
+
+ # auto approve
+ if [[ -z "\${condition}" && ("\${username}" == "system:serviceaccount:openshift-infra:node-bootstrapper" || "\${username}" == "system:node:"* ) ]]; then
+ oc adm certificate approve "\${name}"
+ exit 0
+ fi
+
+ # check certificate age
+ if [[ -n "\${certificate}" ]]; then
+ text="\$( echo "\${certificate}" | base64 -d - )"
+ if ! echo "\${text}" | openssl x509 -noout; then
+ echo "error: Unable to parse certificate" 2>&1
+ exit 1
+ fi
+ if ! echo "\${text}" | openssl x509 -checkend -60 > /dev/null; then
+ echo "Certificate is expired, deleting"
+ oc delete csr "\${name}"
+ fi
+ exit 0
+ fi
+ SCRIPT
+ chmod u+x /tmp/signer
+
+ exec oc observe csr --maximum-errors=1 --resync-period=10m -a '{.status.conditions[*].type}' -a '{.status.certificate}' -a '{.spec.username}' -- /tmp/signer
diff --git a/roles/openshift_bootstrap_autoapprover/tasks/main.yml b/roles/openshift_bootstrap_autoapprover/tasks/main.yml
new file mode 100644
index 000000000..88e9d08e7
--- /dev/null
+++ b/roles/openshift_bootstrap_autoapprover/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Copy auto-approver config to host
+ run_once: true
+ copy:
+ src: "{{ item }}"
+ dest: /tmp/openshift-approver/
+ owner: root
+ mode: 0400
+ with_fileglob:
+ - "*.yaml"
+
+- name: Set auto-approver nodeSelector
+ run_once: true
+ yedit:
+ src: "/tmp/openshift-approver/openshift-bootstrap-controller.yaml"
+ key: spec.template.spec.nodeSelector
+ value: "{{ openshift_master_bootstrap_auto_approver_node_selector | default({}) }}"
+ value_type: list
+
+- name: Create auto-approver on cluster
+ run_once: true
+ command: oc apply -f /tmp/openshift-approver/
+
+- name: Remove auto-approver config
+ run_once: true
+ file:
+ path: /tmp/openshift-approver/
+ state: absent
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index b94cd9fba..9c8534c74 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -19,7 +19,8 @@
- name: Reload generated facts
openshift_facts:
- when: hostvars[openshift_ca_host].install_result is changed
+ when:
+ - hostvars[openshift_ca_host].install_result | default({'changed':false}) is changed
- name: Create openshift_ca_config_dir if it does not exist
file:
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
index ee4048911..395bd304c 100644
--- a/roles/openshift_cloud_provider/tasks/gce.yml
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -13,5 +13,11 @@
ini_file:
dest: "{{ openshift.common.config_base }}/cloudprovider/gce.conf"
section: Global
- option: multizone
- value: "true"
+ option: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
+ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
+ - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
+ - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
+ - { key: 'multizone', value: 'false' }
diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml
index 1a34c85fc..9f46a4683 100644
--- a/roles/openshift_examples/meta/main.yml
+++ b/roles/openshift_examples/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
index 4f5277fa2..22a3fcd3b 100644
--- a/roles/openshift_excluder/tasks/verify_excluder.yml
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -3,7 +3,7 @@
# - excluder
- name: Get available excluder version
repoquery:
- name: "{{ excluder }}"
+ name: "{{ excluder }}{{ '-' ~ r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.') ~ '*' if r_openshift_excluder_upgrade_target is defined else '' }}"
ignore_excluders: true
register: repoquery_out
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 5ae863871..b38ebdfb4 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -8,7 +8,7 @@
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
+ failed_when: has_growpart.rc != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
when: openshift_is_containerized | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index d7c358a2f..26f0525e9 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1465,6 +1465,11 @@ class OpenShiftFacts(object):
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
+ elif bios_vendor == 'Amazon EC2':
+ # Adds support for Amazon EC2 C5 instance types
+ provider = 'aws'
+ metadata_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata = get_provider_metadata(metadata_url)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
diff --git a/roles/openshift_gcp/files/bootstrap-script.sh b/roles/openshift_gcp/files/bootstrap-script.sh
new file mode 100644
index 000000000..0c3f1999b
--- /dev/null
+++ b/roles/openshift_gcp/files/bootstrap-script.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+#
+# This script is a startup script for bootstrapping a GCP node
+# from a config stored in the project metadata. It loops until
+# it finds the script and then starts the origin-node service.
+# TODO: generalize
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [[ "$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap" -H "Metadata-Flavor: Google" )" != "true" ]]; then
+ echo "info: Bootstrap is not enabled for this instance, skipping" 1>&2
+ exit 0
+fi
+
+if ! id=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-id" -H "Metadata-Flavor: Google" ); then
+ echo "error: Unable to get cluster-id for instance from cluster metadata" 1>&2
+ exit 1
+fi
+
+if ! node_group=$( curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/node-group" -H "Metadata-Flavor: Google" ); then
+ echo "error: Unable to get node-group for instance from cluster metadata" 1>&2
+ exit 1
+fi
+
+if ! config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/instance/attributes/bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then
+ while true; do
+ if config=$( curl -f "http://metadata.google.internal/computeMetadata/v1/project/attributes/${id}-bootstrap-config" -H "Metadata-Flavor: Google" 2>/dev/null ); then
+ break
+ fi
+ echo "info: waiting for ${id}-bootstrap-config to become available in cluster metadata ..." 1>&2
+ sleep 5
+ done
+fi
+
+echo "Got bootstrap config from metadata"
+mkdir -p /etc/origin/node
+echo -n "${config}" > /etc/origin/node/bootstrap.kubeconfig
+echo "BOOTSTRAP_CONFIG_NAME=node-config-${node_group}" >> /etc/sysconfig/origin-node
+systemctl enable origin-node
+systemctl start origin-node
diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.service b/roles/openshift_gcp/files/openshift-bootstrap-update.service
new file mode 100644
index 000000000..c65b1b34e
--- /dev/null
+++ b/roles/openshift_gcp/files/openshift-bootstrap-update.service
@@ -0,0 +1,7 @@
+[Unit]
+Description=Update the OpenShift node bootstrap configuration
+
+[Service]
+Type=oneshot
+ExecStart=/usr/bin/openshift-bootstrap-update
+User=root
diff --git a/roles/openshift_gcp/files/openshift-bootstrap-update.timer b/roles/openshift_gcp/files/openshift-bootstrap-update.timer
new file mode 100644
index 000000000..1a517b33e
--- /dev/null
+++ b/roles/openshift_gcp/files/openshift-bootstrap-update.timer
@@ -0,0 +1,10 @@
+[Unit]
+Description=Update the OpenShift node bootstrap credentials hourly
+
+[Timer]
+OnBootSec=30s
+OnCalendar=hourly
+Persistent=true
+
+[Install]
+WantedBy=timers.target \ No newline at end of file
diff --git a/roles/openshift_gcp_image_prep/files/partition.conf b/roles/openshift_gcp/files/partition.conf
index b87e5e0b6..76e65ab9c 100644
--- a/roles/openshift_gcp_image_prep/files/partition.conf
+++ b/roles/openshift_gcp/files/partition.conf
@@ -1,3 +1,3 @@
[Service]
ExecStartPost=-/usr/bin/growpart /dev/sda 1
-ExecStartPost=-/sbin/xfs_growfs /
+ExecStartPost=-/sbin/xfs_growfs / \ No newline at end of file
diff --git a/roles/openshift_gcp/meta/main.yml b/roles/openshift_gcp/meta/main.yml
new file mode 100644
index 000000000..5e428f8de
--- /dev/null
+++ b/roles/openshift_gcp/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Clayton Coleman
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: lib_utils
+- role: lib_openshift
diff --git a/roles/openshift_gcp/tasks/add_custom_repositories.yml b/roles/openshift_gcp/tasks/add_custom_repositories.yml
new file mode 100644
index 000000000..04718f78e
--- /dev/null
+++ b/roles/openshift_gcp/tasks/add_custom_repositories.yml
@@ -0,0 +1,20 @@
+---
+- name: Copy custom repository secrets
+ copy:
+ src: "{{ files_dir }}/{{ item.1.sslclientcert }}"
+ dest: /var/lib/yum/custom_secret_{{ item.0 }}_cert
+ when: item.1.sslclientcert | default(false)
+ with_indexed_items: "{{ provision_custom_repositories }}"
+- name: Copy custom repository secrets
+ copy:
+ src: "{{ files_dir }}/{{ item.1.sslclientkey }}"
+ dest: /var/lib/yum/custom_secret_{{ item.0 }}_key
+ when: item.1.sslclientkey | default(false)
+ with_indexed_items: "{{ provision_custom_repositories }}"
+
+- name: Create any custom repos that are defined
+ template:
+ src: yum_repo.j2
+ dest: /etc/yum.repos.d/provision_custom_repositories.repo
+ when: provision_custom_repositories | length > 0
+ notify: refresh cache
diff --git a/roles/openshift_gcp_image_prep/tasks/main.yaml b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml
index fee5ab618..2c6e2790a 100644
--- a/roles/openshift_gcp_image_prep/tasks/main.yaml
+++ b/roles/openshift_gcp/tasks/configure_gcp_base_image.yml
@@ -1,18 +1,10 @@
----
# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
+---
- name: Remove barrier=1 from XFS fstab entries
- lineinfile:
- path: /etc/fstab
- regexp: '^(.+)xfs(.+?),?barrier=1,?(.*?)$'
- line: '\1xfs\2 \4'
- backrefs: yes
+ command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab
- name: Ensure the root filesystem has XFS group quota turned on
- lineinfile:
- path: /boot/grub2/grub.cfg
- regexp: '^(.*)linux16 (.*)$'
- line: '\1linux16 \2 rootflags=gquota'
- backrefs: yes
+ command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg
- name: Ensure the root partition grows on startup
copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/
diff --git a/roles/openshift_gcp/tasks/configure_master_bootstrap.yml b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml
new file mode 100644
index 000000000..591cb593c
--- /dev/null
+++ b/roles/openshift_gcp/tasks/configure_master_bootstrap.yml
@@ -0,0 +1,36 @@
+#
+# These tasks configure the instance to periodically update the project metadata with the
+# latest bootstrap kubeconfig from the project metadata. This keeps the project metadata
+# in sync with the cluster's configuration. We then invoke a CSR approve on any nodes that
+# are waiting to join the cluster.
+#
+---
+- name: Copy unit service
+ copy:
+ src: openshift-bootstrap-update.timer
+ dest: /etc/systemd/system/openshift-bootstrap-update.timer
+ owner: root
+ group: root
+ mode: 0664
+
+- name: Copy unit timer
+ copy:
+ src: openshift-bootstrap-update.service
+ dest: /etc/systemd/system/openshift-bootstrap-update.service
+ owner: root
+ group: root
+ mode: 0664
+
+- name: Create bootstrap update script
+ template: src=openshift-bootstrap-update.j2 dest=/usr/bin/openshift-bootstrap-update mode=u+rx
+
+- name: Start bootstrap update timer
+ systemd:
+ name: "openshift-bootstrap-update.timer"
+ state: started
+
+- name: Bootstrap all nodes that were identified with bootstrap metadata
+ run_once: true
+ oc_adm_csr:
+ nodes: "{{ groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list }}"
+ timeout: 60
diff --git a/roles/openshift_gcp/tasks/configure_master_healthcheck.yml b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml
new file mode 100644
index 000000000..aa9655977
--- /dev/null
+++ b/roles/openshift_gcp/tasks/configure_master_healthcheck.yml
@@ -0,0 +1,19 @@
+---
+- name: refresh yum cache
+ command: yum clean all
+ args:
+ warn: no
+ when: ansible_os_family == "RedHat"
+
+- name: install haproxy
+ package: name=haproxy state=present
+ register: result
+ until: '"failed" not in result'
+ retries: 10
+ delay: 10
+
+- name: configure haproxy
+ template: src=master_healthcheck.j2 dest=/etc/haproxy/haproxy.cfg
+
+- name: start and enable haproxy service
+ service: name=haproxy state=started enabled=yes
diff --git a/roles/openshift_gcp/tasks/dynamic_inventory.yml b/roles/openshift_gcp/tasks/dynamic_inventory.yml
new file mode 100644
index 000000000..1637da945
--- /dev/null
+++ b/roles/openshift_gcp/tasks/dynamic_inventory.yml
@@ -0,0 +1,5 @@
+---
+- name: Extract PEM from service account file
+ copy: content="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}" dest=/tmp/gce.pem mode=0600
+- name: Templatize environment script
+ template: src=inventory.j2.sh dest=/tmp/inventory.sh mode=u+rx
diff --git a/roles/openshift_gcp/tasks/frequent_log_rotation.yml b/roles/openshift_gcp/tasks/frequent_log_rotation.yml
new file mode 100644
index 000000000..0b4b27f84
--- /dev/null
+++ b/roles/openshift_gcp/tasks/frequent_log_rotation.yml
@@ -0,0 +1,18 @@
+---
+- name: Rotate logs daily
+ replace:
+ dest: /etc/logrotate.conf
+ regexp: '^weekly|monthly|yearly$'
+ replace: daily
+- name: Rotate at a smaller size of log
+ lineinfile:
+ dest: /etc/logrotate.conf
+ state: present
+ regexp: '^size'
+ line: size 10M
+- name: Limit total size of log files
+ lineinfile:
+ dest: /etc/logrotate.conf
+ state: present
+ regexp: '^maxsize'
+ line: maxsize 20M
diff --git a/roles/openshift_gcp/tasks/main.yaml b/roles/openshift_gcp/tasks/main.yml
index ad205ba33..fb147bc78 100644
--- a/roles/openshift_gcp/tasks/main.yaml
+++ b/roles/openshift_gcp/tasks/main.yml
@@ -17,7 +17,7 @@
- name: Provision GCP DNS domain
command: /tmp/openshift_gcp_provision_dns.sh
args:
- chdir: "{{ playbook_dir }}/files"
+ chdir: "{{ files_dir }}"
register: dns_provision
when:
- state | default('present') == 'present'
@@ -33,7 +33,7 @@
- name: Provision GCP resources
command: /tmp/openshift_gcp_provision.sh
args:
- chdir: "{{ playbook_dir }}/files"
+ chdir: "{{ files_dir }}"
when:
- state | default('present') == 'present'
diff --git a/roles/openshift_gcp/tasks/node_cloud_config.yml b/roles/openshift_gcp/tasks/node_cloud_config.yml
new file mode 100644
index 000000000..4e982f497
--- /dev/null
+++ b/roles/openshift_gcp/tasks/node_cloud_config.yml
@@ -0,0 +1,12 @@
+---
+- name: ensure the /etc/origin folder exists
+ file: name=/etc/origin state=directory
+
+- name: configure gce cloud config options
+ ini_file: dest=/etc/origin/cloudprovider/gce.conf section=Global option={{ item.key }} value={{ item.value }} state=present create=yes
+ with_items:
+ - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
+ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
+ - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
+ - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
+ - { key: 'multizone', value: 'false' }
diff --git a/roles/openshift_gcp/tasks/publish_image.yml b/roles/openshift_gcp/tasks/publish_image.yml
new file mode 100644
index 000000000..db8a7ca69
--- /dev/null
+++ b/roles/openshift_gcp/tasks/publish_image.yml
@@ -0,0 +1,32 @@
+---
+- name: Require openshift_gcp_image
+ fail:
+ msg: "A source image name or family is required for image publishing. Please ensure `openshift_gcp_image` is defined."
+ when: openshift_gcp_image is undefined
+
+- name: Require openshift_gcp_target_image
+ fail:
+ msg: "A target image name or family is required for image publishing. Please ensure `openshift_gcp_target_image` is defined."
+ when: openshift_gcp_target_image is undefined
+
+- block:
+ - name: Retrieve images in the {{ openshift_gcp_target_image }} family
+ command: >
+ gcloud --project "{{ openshift_gcp_project }}" compute images list
+ "--filter=family={{ openshift_gcp_target_image }}"
+ --format=json --sort-by ~creationTimestamp
+ register: images
+ - name: Prune oldest images
+ command: >
+ gcloud --project "{{ openshift_gcp_project }}" compute images delete "{{ item['name'] }}"
+ with_items: "{{ (images.stdout | default('[]') | from_json )[( openshift_gcp_keep_images | int ):] }}"
+ when: openshift_gcp_keep_images is defined
+
+- name: Copy the latest image in the family {{ openshift_gcp_image }} to {{ openshift_gcp_target_image }}
+ command: >
+ gcloud --project "{{ openshift_gcp_target_project | default(openshift_gcp_project) }}"
+ beta compute images create
+ "{{ openshift_gcp_target_image_name | default(openshift_gcp_target_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}"
+ --family "{{ openshift_gcp_target_image }}"
+ --source-image-family "{{ openshift_gcp_image }}"
+ --source-image-project "{{ openshift_gcp_project }}"
diff --git a/roles/openshift_gcp/tasks/setup_scale_group_facts.yml b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml
new file mode 100644
index 000000000..0fda43123
--- /dev/null
+++ b/roles/openshift_gcp/tasks/setup_scale_group_facts.yml
@@ -0,0 +1,44 @@
+---
+- name: Add masters to requisite groups
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: masters, etcd
+ with_items: "{{ groups['tag_ocp-master'] }}"
+
+- name: Add a master to the primary masters group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: primary_master
+ with_items: "{{ groups['tag_ocp-master'].0 }}"
+
+- name: Add non-bootstrapping master node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: infra
+ with_items: "{{ groups['tag_ocp-master'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add infra node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: infra
+ with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add node instances to node group
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: nodes
+ openshift_node_labels:
+ role: app
+ with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add bootstrap node instances
+ add_host:
+ name: "{{ hostvars[item].gce_name }}"
+ groups: bootstrap_nodes
+ openshift_node_bootstrap: True
+ with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}"
+ when: not (openshift_node_bootstrap | default(False))
diff --git a/roles/openshift_gcp/templates/inventory.j2.sh b/roles/openshift_gcp/templates/inventory.j2.sh
new file mode 100644
index 000000000..dcaffb578
--- /dev/null
+++ b/roles/openshift_gcp/templates/inventory.j2.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+export GCE_PROJECT="{{ openshift_gcp_project }}"
+export GCE_ZONE="{{ openshift_gcp_zone }}"
+export GCE_EMAIL="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+export GCE_PEM_FILE_PATH="/tmp/gce.pem"
+export INVENTORY_IP_TYPE="{{ inventory_ip_type }}"
+export GCE_TAGGED_INSTANCES="{{ openshift_gcp_prefix }}ocp" \ No newline at end of file
diff --git a/roles/openshift_gcp/templates/master_healthcheck.j2 b/roles/openshift_gcp/templates/master_healthcheck.j2
new file mode 100644
index 000000000..189e578c5
--- /dev/null
+++ b/roles/openshift_gcp/templates/master_healthcheck.j2
@@ -0,0 +1,68 @@
+#---------------------------------------------------------------------
+# Example configuration for a possible web application. See the
+# full configuration options online.
+#
+# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
+#
+#---------------------------------------------------------------------
+
+#---------------------------------------------------------------------
+# Global settings
+#---------------------------------------------------------------------
+global
+ # to have these messages end up in /var/log/haproxy.log you will
+ # need to:
+ #
+ # 1) configure syslog to accept network log events. This is done
+ # by adding the '-r' option to the SYSLOGD_OPTIONS in
+ # /etc/sysconfig/syslog
+ #
+ # 2) configure local2 events to go to the /var/log/haproxy.log
+ # file. A line like the following can be added to
+ # /etc/sysconfig/syslog
+ #
+ # local2.* /var/log/haproxy.log
+ #
+ log 127.0.0.1 local2
+
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ user haproxy
+ group haproxy
+ daemon
+
+ # turn on stats unix socket
+ stats socket /var/lib/haproxy/stats
+
+#---------------------------------------------------------------------
+# common defaults that all the 'listen' and 'backend' sections will
+# use if not designated in their block
+#---------------------------------------------------------------------
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+
+#---------------------------------------------------------------------
+# main frontend which proxys to the backends
+#---------------------------------------------------------------------
+frontend http-proxy *:8080
+ acl url_healthz path_beg -i /healthz
+ use_backend ocp if url_healthz
+
+backend ocp
+ server ocp localhost:{{ internal_console_port }} ssl verify none
diff --git a/roles/openshift_gcp/templates/openshift-bootstrap-update.j2 b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2
new file mode 100644
index 000000000..5b0563724
--- /dev/null
+++ b/roles/openshift_gcp/templates/openshift-bootstrap-update.j2
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -euo pipefail
+
+oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig
+gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig'
+rm -f /root/bootstrap.kubeconfig
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index 4d150bc74..794985322 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -9,15 +9,26 @@ if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then
ssh-add "{{ openshift_gcp_ssh_private_key }}" || true
fi
- # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
- pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
+ # Check if the public key is in the project metadata, and if not, add it there
+ if [ -f "{{ openshift_gcp_ssh_private_key }}.pub" ]; then
+ pub_file="{{ openshift_gcp_ssh_private_key }}.pub"
+ pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
+ else
+ keyfile="${HOME}/.ssh/google_compute_engine"
+ pub_file="${keyfile}.pub"
+ mkdir -p "${HOME}/.ssh"
+ cp "{{ openshift_gcp_ssh_private_key }}" "${keyfile}"
+ chmod 0600 "${keyfile}"
+ ssh-keygen -y -f "${keyfile}" > "${pub_file}"
+ pub_key=$(cut -d ' ' -f 2 < "${pub_file}")
+ fi
key_tmp_file='/tmp/ocp-gce-keys'
if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then
if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then
gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
fi
echo -n 'cloud-user:' >> "$key_tmp_file"
- cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file"
+ cat "${pub_file}" >> "$key_tmp_file"
gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
rm -f "$key_tmp_file"
fi
diff --git a/roles/openshift_gcp/templates/yum_repo.j2 b/roles/openshift_gcp/templates/yum_repo.j2
new file mode 100644
index 000000000..77919ea75
--- /dev/null
+++ b/roles/openshift_gcp/templates/yum_repo.j2
@@ -0,0 +1,20 @@
+{% for repo in provision_custom_repositories %}
+[{{ repo.id | default(repo.name) }}]
+name={{ repo.name | default(repo.id) }}
+baseurl={{ repo.baseurl }}
+{% set enable_repo = repo.enabled | default(1) %}
+enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
+{% set enable_gpg_check = repo.gpgcheck | default(1) %}
+gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
+{% if 'sslclientcert' in repo %}
+sslclientcert={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_cert" if repo.sslclientcert }}
+{% endif %}
+{% if 'sslclientkey' in repo %}
+sslclientkey={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_key" if repo.sslclientkey }}
+{% endif %}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck', 'sslclientkey', 'sslclientcert'] and value is defined %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+{% endfor %}
diff --git a/roles/openshift_grafana/defaults/main.yml b/roles/openshift_grafana/defaults/main.yml
new file mode 100644
index 000000000..7fd7a085d
--- /dev/null
+++ b/roles/openshift_grafana/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+gf_body_tmp:
+ name: grafana_name
+ type: prometheus
+ typeLogoUrl: ''
+ access: proxy
+ url: prometheus_url
+ basicAuth: false
+ withCredentials: false
+ jsonData:
+ tlsSkipVerify: true
+ token: satoken
diff --git a/roles/openshift_grafana/files/grafana-ocp-oauth.yml b/roles/openshift_grafana/files/grafana-ocp-oauth.yml
new file mode 100644
index 000000000..82fa89004
--- /dev/null
+++ b/roles/openshift_grafana/files/grafana-ocp-oauth.yml
@@ -0,0 +1,661 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: grafana-ocp
+ annotations:
+ "openshift.io/display-name": Grafana ocp
+ description: |
+ Grafana server with patched Prometheus datasource.
+ iconClass: icon-cogs
+ tags: "metrics,monitoring,grafana,prometheus"
+parameters:
+- description: The location of the proxy image
+ name: IMAGE_GF
+ value: mrsiano/grafana-ocp:latest
+- description: The location of the proxy image
+ name: IMAGE_PROXY
+ value: openshift/oauth-proxy:v1.0.0
+- description: External URL for the grafana route
+ name: ROUTE_URL
+ value: ""
+- description: The namespace to instantiate heapster under. Defaults to 'grafana'.
+ name: NAMESPACE
+ value: grafana
+- description: The session secret for the proxy
+ name: SESSION_SECRET
+ generate: expression
+ from: "[a-zA-Z0-9]{43}"
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ annotations:
+ serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana-ocp"}}'
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: gf-cluster-reader
+ roleRef:
+ name: cluster-reader
+ subjects:
+ - kind: ServiceAccount
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+- apiVersion: route.openshift.io/v1
+ kind: Route
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ host: "${ROUTE_URL}"
+ to:
+ name: grafana-ocp
+ tls:
+ termination: Reencrypt
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: grafana-ocp
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/scheme: https
+ service.alpha.openshift.io/serving-cert-secret-name: gf-tls
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ ports:
+ - name: grafana-ocp
+ port: 443
+ protocol: TCP
+ targetPort: 8443
+ selector:
+ app: grafana-ocp
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: gf-proxy
+ namespace: "${NAMESPACE}"
+ stringData:
+ session_secret: "${SESSION_SECRET}="
+# Deploy Prometheus behind an oauth proxy
+- apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ labels:
+ app: grafana-ocp
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: grafana-ocp
+ template:
+ metadata:
+ labels:
+ app: grafana-ocp
+ name: grafana-ocp-app
+ spec:
+ serviceAccountName: grafana-ocp
+ containers:
+ - name: oauth-proxy
+ image: ${IMAGE_PROXY}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 8443
+ name: web
+ args:
+ - -https-address=:8443
+ - -http-address=
+ - -email-domain=*
+ - -client-id=system:serviceaccount:${NAMESPACE}:grafana-ocp
+ - -upstream=http://localhost:3000
+ - -provider=openshift
+# - '-openshift-delegate-urls={"/api/datasources": {"resource": "namespace", "verb": "get", "resourceName": "grafana-ocp", "namespace": "${NAMESPACE}"}}'
+ - '-openshift-sar={"namespace": "${NAMESPACE}", "verb": "list", "resource": "services"}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics,/api/datasources,/api/dashboards
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: gf-tls
+ - mountPath: /etc/proxy/secrets
+ name: secrets
+
+ - name: grafana-ocp
+ image: ${IMAGE_GF}
+ ports:
+ - name: grafana-http
+ containerPort: 3000
+ volumeMounts:
+ - mountPath: "/root/go/src/github.com/grafana/grafana/data"
+ name: gf-data
+ - mountPath: "/root/go/src/github.com/grafana/grafana/conf"
+ name: gfconfig
+ - mountPath: /etc/tls/private
+ name: gf-tls
+ - mountPath: /etc/proxy/secrets
+ name: secrets
+ command:
+ - "./bin/grafana-server"
+
+ volumes:
+ - name: gfconfig
+ configMap:
+ name: gf-config
+ - name: secrets
+ secret:
+ secretName: gf-proxy
+ - name: gf-tls
+ secret:
+ secretName: gf-tls
+ - emptyDir: {}
+ name: gf-data
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: gf-config
+ namespace: "${NAMESPACE}"
+ data:
+ defaults.ini: |-
+ ##################### Grafana Configuration Defaults #####################
+ #
+ # Do not modify this file in grafana installs
+ #
+
+ # possible values : production, development
+ app_mode = production
+
+ # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+ instance_name = ${HOSTNAME}
+
+ #################################### Paths ###############################
+ [paths]
+ # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
+ #
+ data = data
+ #
+ # Directory where grafana can store logs
+ #
+ logs = data/log
+ #
+ # Directory where grafana will automatically scan and look for plugins
+ #
+ plugins = data/plugins
+
+ #################################### Server ##############################
+ [server]
+ # Protocol (http, https, socket)
+ protocol = http
+
+ # The ip address to bind to, empty will bind to all interfaces
+ http_addr =
+
+ # The http port to use
+ http_port = 3000
+
+ # The public facing domain name used to access grafana from a browser
+ domain = localhost
+
+ # Redirect to correct domain if host header does not match domain
+ # Prevents DNS rebinding attacks
+ enforce_domain = false
+
+ # The full public facing url
+ root_url = %(protocol)s://%(domain)s:%(http_port)s/
+
+ # Log web requests
+ router_logging = false
+
+ # the path relative working path
+ static_root_path = public
+
+ # enable gzip
+ enable_gzip = false
+
+ # https certs & key file
+ cert_file = /etc/tls/private/tls.crt
+ cert_key = /etc/tls/private/tls.key
+
+ # Unix socket path
+ socket = /tmp/grafana.sock
+
+ #################################### Database ############################
+ [database]
+ # You can configure the database connection by specifying type, host, name, user and password
+ # as separate properties or as on string using the url property.
+
+ # Either "mysql", "postgres" or "sqlite3", it's your choice
+ type = sqlite3
+ host = 127.0.0.1:3306
+ name = grafana
+ user = root
+ # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
+ password =
+ # Use either URL or the previous fields to configure the database
+ # Example: mysql://user:secret@host:port/database
+ url =
+
+ # Max idle conn setting default is 2
+ max_idle_conn = 2
+
+ # Max conn setting default is 0 (mean not set)
+ max_open_conn =
+
+ # For "postgres", use either "disable", "require" or "verify-full"
+ # For "mysql", use either "true", "false", or "skip-verify".
+ ssl_mode = disable
+
+ ca_cert_path =
+ client_key_path =
+ client_cert_path =
+ server_cert_name =
+
+ # For "sqlite3" only, path relative to data_path setting
+ path = grafana.db
+
+ #################################### Session #############################
+ [session]
+ # Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file"
+ provider = file
+
+ # Provider config options
+ # memory: not have any config yet
+ # file: session dir path, is relative to grafana data_path
+ # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
+ # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
+ # mysql: go-sql-driver/mysql dsn config string, examples:
+ # `user:password@tcp(127.0.0.1:3306)/database_name`
+ # `user:password@unix(/var/run/mysqld/mysqld.sock)/database_name`
+ # memcache: 127.0.0.1:11211
+
+
+ provider_config = sessions
+
+ # Session cookie name
+ cookie_name = grafana_sess
+
+ # If you use session in https only, default is false
+ cookie_secure = false
+
+ # Session life time, default is 86400
+ session_life_time = 86400
+ gc_interval_time = 86400
+
+ #################################### Data proxy ###########################
+ [dataproxy]
+
+ # This enables data proxy logging, default is false
+ logging = false
+
+ #################################### Analytics ###########################
+ [analytics]
+ # Server reporting, sends usage counters to stats.grafana.org every 24 hours.
+ # No ip addresses are being tracked, only simple counters to track
+ # running instances, dashboard and error counts. It is very helpful to us.
+ # Change this option to false to disable reporting.
+ reporting_enabled = true
+
+ # Set to false to disable all checks to https://grafana.com
+ # for new versions (grafana itself and plugins), check is used
+ # in some UI views to notify that grafana or plugin update exists
+ # This option does not cause any auto updates, nor send any information
+ # only a GET request to https://grafana.com to get latest versions
+ check_for_updates = true
+
+ # Google Analytics universal tracking code, only enabled if you specify an id here
+ google_analytics_ua_id =
+
+ # Google Tag Manager ID, only enabled if you specify an id here
+ google_tag_manager_id =
+
+ #################################### Security ############################
+ [security]
+ # default admin user, created on startup
+ admin_user = admin
+
+ # default admin password, can be changed before first start of grafana, or in profile settings
+ admin_password = admin
+
+ # used for signing
+ secret_key = SW2YcwTIb9zpOOhoPsMm
+
+ # Auto-login remember days
+ login_remember_days = 7
+ cookie_username = grafana_user
+ cookie_remember_name = grafana_remember
+
+ # disable gravatar profile images
+ disable_gravatar = false
+
+ # data source proxy whitelist (ip_or_domain:port separated by spaces)
+ data_source_proxy_whitelist =
+
+ [snapshots]
+ # snapshot sharing options
+ external_enabled = true
+ external_snapshot_url = https://snapshots-origin.raintank.io
+ external_snapshot_name = Publish to snapshot.raintank.io
+
+ # remove expired snapshot
+ snapshot_remove_expired = true
+
+ # remove snapshots after 90 days
+ snapshot_TTL_days = 90
+
+ #################################### Users ####################################
+ [users]
+ # disable user signup / registration
+ allow_sign_up = true
+
+ # Allow non admin users to create organizations
+ allow_org_create = true
+
+ # Set to true to automatically assign new users to the default organization (id 1)
+ auto_assign_org = true
+
+ # Default role new users will be automatically assigned (if auto_assign_org above is set to true)
+ auto_assign_org_role = Admin
+
+ # Require email validation before sign up completes
+ verify_email_enabled = false
+
+ # Background text for the user field on the login page
+ login_hint = email or username
+
+ # Default UI theme ("dark" or "light")
+ default_theme = dark
+
+ # External user management
+ external_manage_link_url =
+ external_manage_link_name =
+ external_manage_info =
+
+ [auth]
+ # Set to true to disable (hide) the login form, useful if you use OAuth
+ disable_login_form = true
+
+ # Set to true to disable the signout link in the side menu. useful if you use auth.proxy
+ disable_signout_menu = true
+
+ #################################### Anonymous Auth ######################
+ [auth.anonymous]
+ # enable anonymous access
+ enabled = true
+
+ # specify organization name that should be used for unauthenticated users
+ org_name = Main Org.
+
+ # specify role for unauthenticated users
+ org_role = Admin
+
+ #################################### Github Auth #########################
+ [auth.github]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ auth_url = https://github.com/login/oauth/authorize
+ token_url = https://github.com/login/oauth/access_token
+ api_url = https://api.github.com/user
+ team_ids =
+ allowed_organizations =
+
+ #################################### Google Auth #########################
+ [auth.google]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_client_id
+ client_secret = some_client_secret
+ scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
+ auth_url = https://accounts.google.com/o/oauth2/auth
+ token_url = https://accounts.google.com/o/oauth2/token
+ api_url = https://www.googleapis.com/oauth2/v1/userinfo
+ allowed_domains =
+ hosted_domain =
+
+ #################################### Grafana.com Auth ####################
+ # legacy key names (so they work in env variables)
+ [auth.grafananet]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ allowed_organizations =
+
+ [auth.grafana_com]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ allowed_organizations =
+
+ #################################### Generic OAuth #######################
+ [auth.generic_oauth]
+ name = OAuth
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ auth_url =
+ token_url =
+ api_url =
+ team_ids =
+ allowed_organizations =
+
+ #################################### Basic Auth ##########################
+ [auth.basic]
+ enabled = false
+
+ #################################### Auth Proxy ##########################
+ [auth.proxy]
+ enabled = true
+ header_name = X-WEBAUTH-USER
+ header_property = username
+ auto_sign_up = true
+ ldap_sync_ttl = 60
+ whitelist =
+
+ #################################### Auth LDAP ###########################
+ [auth.ldap]
+ enabled = false
+ config_file = /etc/grafana/ldap.toml
+ allow_sign_up = true
+
+ #################################### SMTP / Emailing #####################
+ [smtp]
+ enabled = false
+ host = localhost:25
+ user =
+ # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
+ password =
+ cert_file =
+ key_file =
+ skip_verify = false
+ from_address = admin@grafana.localhost
+ from_name = Grafana
+ ehlo_identity =
+
+ [emails]
+ welcome_email_on_sign_up = false
+ templates_pattern = emails/*.html
+
+ #################################### Logging ##########################
+ [log]
+ # Either "console", "file", "syslog". Default is console and file
+ # Use space to separate multiple modes, e.g. "console file"
+ mode = console file
+
+ # Either "debug", "info", "warn", "error", "critical", default is "info"
+ level = error
+
+ # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
+ filters =
+
+ # For "console" mode only
+ [log.console]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = console
+
+ # For "file" mode only
+ [log.file]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = text
+
+ # This enables automated log rotate(switch of following options), default is true
+ log_rotate = true
+
+ # Max line number of single file, default is 1000000
+ max_lines = 1000000
+
+ # Max size shift of single file, default is 28 means 1 << 28, 256MB
+ max_size_shift = 28
+
+ # Segment log daily, default is true
+ daily_rotate = true
+
+ # Expired days of log file(delete after max days), default is 7
+ max_days = 7
+
+ [log.syslog]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = text
+
+ # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
+ network =
+ address =
+
+ # Syslog facility. user, daemon and local0 through local7 are valid.
+ facility =
+
+ # Syslog tag. By default, the process' argv[0] is used.
+ tag =
+
+
+ #################################### AMQP Event Publisher ################
+ [event_publisher]
+ enabled = false
+ rabbitmq_url = amqp://localhost/
+ exchange = grafana_events
+
+ #################################### Dashboard JSON files ################
+ [dashboards.json]
+ enabled = false
+ path = /var/lib/grafana/dashboards
+
+ #################################### Usage Quotas ########################
+ [quota]
+ enabled = false
+
+ #### set quotas to -1 to make unlimited. ####
+ # limit number of users per Org.
+ org_user = 10
+
+ # limit number of dashboards per Org.
+ org_dashboard = 100
+
+ # limit number of data_sources per Org.
+ org_data_source = 10
+
+ # limit number of api_keys per Org.
+ org_api_key = 10
+
+ # limit number of orgs a user can create.
+ user_org = 10
+
+ # Global limit of users.
+ global_user = -1
+
+ # global limit of orgs.
+ global_org = -1
+
+ # global limit of dashboards
+ global_dashboard = -1
+
+ # global limit of api_keys
+ global_api_key = -1
+
+ # global limit on number of logged in users.
+ global_session = -1
+
+ #################################### Alerting ############################
+ [alerting]
+ # Disable alerting engine & UI features
+ enabled = true
+ # Makes it possible to turn off alert rule execution but alerting UI is visible
+ execute_alerts = true
+
+ #################################### Internal Grafana Metrics ############
+ # Metrics available at HTTP API Url /api/metrics
+ [metrics]
+ enabled = true
+ interval_seconds = 10
+
+ # Send internal Grafana metrics to graphite
+ [metrics.graphite]
+ # Enable by setting the address setting (ex localhost:2003)
+ address =
+ prefix = prod.grafana.%(instance_name)s.
+
+ [grafana_net]
+ url = https://grafana.com
+
+ [grafana_com]
+ url = https://grafana.com
+
+ #################################### Distributed tracing ############
+ [tracing.jaeger]
+ # jaeger destination (ex localhost:6831)
+ address =
+ # tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
+ always_included_tag =
+ # Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
+ sampler_type = const
+ # jaeger samplerconfig param
+ # for "const" sampler, 0 or 1 for always false/true respectively
+ # for "probabilistic" sampler, a probability between 0 and 1
+ # for "rateLimiting" sampler, the number of spans per second
+ # for "remote" sampler, param is the same as for "probabilistic"
+ # and indicates the initial sampling rate before the actual one
+ # is received from the mothership
+ sampler_param = 1
+
+ #################################### External Image Storage ##############
+ [external_image_storage]
+ # You can choose between (s3, webdav, gcs)
+ provider =
+
+ [external_image_storage.s3]
+ bucket_url =
+ bucket =
+ region =
+ path =
+ access_key =
+ secret_key =
+
+ [external_image_storage.webdav]
+ url =
+ username =
+ password =
+ public_url =
+
+ [external_image_storage.gcs]
+ key_file =
+ bucket =
diff --git a/roles/openshift_grafana/files/grafana-ocp.yml b/roles/openshift_grafana/files/grafana-ocp.yml
new file mode 100644
index 000000000..bc7b4b286
--- /dev/null
+++ b/roles/openshift_grafana/files/grafana-ocp.yml
@@ -0,0 +1,76 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: grafana-ocp
+ annotations:
+ "openshift.io/display-name": Grafana ocp
+ description: |
+ Grafana server with patched Prometheus datasource.
+ iconClass: icon-cogs
+ tags: "metrics,monitoring,grafana,prometheus"
+parameters:
+- description: External URL for the grafana route
+ name: ROUTE_URL
+ value: ""
+- description: The namespace to instantiate heapster under. Defaults to 'grafana'.
+ name: NAMESPACE
+ value: grafana
+objects:
+- apiVersion: route.openshift.io/v1
+ kind: Route
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ host: "${ROUTE_URL}"
+ to:
+ name: grafana-ocp
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ selector:
+ name: grafana-ocp
+ ports:
+ - port: 8082
+ protocol: TCP
+ targetPort: grafana-http
+- apiVersion: v1
+ kind: ReplicationController
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ selector:
+ name: grafana-ocp
+ replicas: 1
+ template:
+ version: v1
+ metadata:
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ volumes:
+ - name: data
+ emptyDir: {}
+ containers:
+ - image: "mrsiano/grafana-ocp:latest"
+ name: grafana-ocp
+ ports:
+ - name: grafana-http
+ containerPort: 3000
+ volumeMounts:
+ - name: data
+ mountPath: "/root/go/src/github.com/grafana/grafana/data"
+ command:
+ - "./bin/grafana-server"
diff --git a/roles/openshift_grafana/files/openshift-cluster-monitoring.json b/roles/openshift_grafana/files/openshift-cluster-monitoring.json
new file mode 100644
index 000000000..f59ca997f
--- /dev/null
+++ b/roles/openshift_grafana/files/openshift-cluster-monitoring.json
@@ -0,0 +1,5138 @@
+{
+ "dashboard": {
+ "description": "Monitors Openshift cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.",
+ "editable": true,
+ "gnetId": 315,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [],
+ "rows": [
+ {
+ "collapse": false,
+ "height": "200px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "height": "200px",
+ "id": 32,
+ "legend": {
+ "alignAsTable": false,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "instant": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "Received",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "Sent",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network I/O pressure",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Network I/O pressure",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 4,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster memory usage",
+ "transparent": false,
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 6,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster CPU usage ",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 7,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "metric": "",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster filesystem usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 9,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "20%",
+ "prefix": "",
+ "prefixFontSize": "20%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 10,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 11,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 12,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 13,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 14,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Total usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 33,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) ",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "overall cpu usage",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cluster CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 17,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name) * 100",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods CPU usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": "% Usage",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 24,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": null,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers Cores Usage",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 23,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System services CPU usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "System services CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 411,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 34,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_memory_usage_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes Memory usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 25,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 26,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_rss{systemd_service_name=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ systemd_service_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System services memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "System services memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 27,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}) by (container_name, pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "C",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "500px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 28,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 30,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "D",
+ "step": 1
+ },
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "C",
+ "step": 1
+ },
+ {
+ "expr": "sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "E",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "F",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 277,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 16,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ pod_name }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ pod_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "500px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 29,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "instant": true,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ id }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ id }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 35,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total) by (phase,reason)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ phase }} | {{ reason }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_build_total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 54,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count(openshift_build_active_time_seconds{phase=\"running\"} offset 10m)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of builds that have been running for more than 10 minutes (600 seconds).",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 55,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count(openshift_build_active_time_seconds{phase=\"pending\"} offset 10m)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of build that have been waiting at least 10 minutes (600 seconds) to start.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 56,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total{phase=\"Failed\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of failed builds, regardless of the failure reason.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 57,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_build_total{phase=\"Failed\",reason=\"FetchSourceFailed\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{ instance }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of failed builds because of problems retrieving source from the associated Git repository.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 58,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total{phase=\"Complete\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of successfully completed builds.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 0,
+ "id": 59,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_build_total{phase=\"Failed\"} offset 5m",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ reason }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the failed builds totals, per failure reason, from 5 minutes ago.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift Builds",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 36,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_sdn_pod_setup_latency_sum)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_setup_latency_sum",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 41,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_sdn_pod_teardown_latency{quantile=\"0.9\"}) by (instance)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_teardown_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 50,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, (sum by (pod_name) (irate(container_network_receive_bytes_total{pod_name!=\"\"}[5m]))))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top 10 pods doing the most receive network traffic",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 37,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_pod_ips",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ instance }} | {{ role }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_ips",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 39,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 42,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_arp_cache_entries",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ role }} | {{ instance }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_arp_cache_entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 40,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_arp_cache_entries",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_arp_cache_entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift SDN",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 44,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(kubelet_pleg_relist_latency_microseconds{kubernetes_io_hostname=~\"$Node\",quantile=\"0.9\"}[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ role }} | {{ instance }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "kubelet_pleg_relist",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 51,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 52,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_docker_operations_timeout",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns a running count (not a rate) of docker operations that have timed out since the kubelet was started.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 53,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_docker_operations_errors",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns a running count (not a rate) of docker operations that have failed since the kubelet was started.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Kubelet",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 46,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(scrape_samples_scraped[2m])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{ kubernetes_name }} | {{ instance }} ",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "scrape_samples_scraped",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 68,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (cpu) (irate(container_cpu_usage_seconds_total{container_name=\"prometheus\"}[5m])))",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU per instance of Prometheus container.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Prometheus",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 48,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb!~\"GET|LIST|WATCH\"}[2m]))) > 0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ resource }} || {{ verb }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of mutating API requests being made to the control plane.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 49,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb=~\"GET|LIST|WATCH\"}[2m]))) > 0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ resource }} || {{ pod }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of non-mutating API requests being made to the control plane.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 74,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "endpoint_queue_latency",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": " quantile {{ quantile }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "endpoint_queue_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "API Server",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 61,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "etcd_disk_wal_fsync_duration_seconds_count",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "etcd_disk_wal_fsync_duration_seconds_count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "etcd",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 62,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(changes(container_start_time_seconds[10m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "The number of containers that start or restart over the last ten minutes.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Changes in your cluster",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 63,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(machine_cpu_cores)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total number of cores in the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 64,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(sort_desc(irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total number of consumed cores.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 65,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (kubernetes_io_hostname,type) (irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumed per node in the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 66,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (cpu,id,pod_name,container_name) (irate(container_cpu_usage_seconds_total{role=\"infra\"}[5m])))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumption per system service or container on the infrastructure nodes.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 67,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (namespace) (irate(container_cpu_usage_seconds_total[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumed per namespace on the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 47,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(irate(container_cpu_usage_seconds_total{id=\"/\"}[3m])) / sum(machine_cpu_cores)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Percentage of total cluster CPU in use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 69,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(container_memory_rss) / sum(machine_memory_bytes)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Percentage of total cluster memory in use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 70,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (kubernetes_io_hostname) (irate(container_cpu_usage_seconds_total{id=~\"/system.slice/(docker|etcd).service\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Aggregate CPU usage (seconds total) of etcd+docker",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "System and container CPU",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 71,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "volumes_queue_latency",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "volumes_queue_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 72,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(cloudprovider_gce_api_request_duration_seconds_count[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ request }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "cloudprovider_aws_api_request_duration_seconds_count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 73,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate(storage_operation_duration_seconds_sum{kubernetes_io_hostname=~\"$Node\"}[2m])) by (operation_name,kubernetes_io_hostname)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ operation_name }} || {{ kubernetes_io_hostname }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "storage_operation_duration_seconds_sum",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift Volumes",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes",
+ "openshift"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": ".*",
+ "current": {},
+ "datasource": "${DS_PR}",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "Node",
+ "options": [],
+ "query": "label_values(kubernetes_io_hostname)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-30m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "1s",
+ "2m",
+ "20s",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "openshift cluster monitoring",
+ "version": 6
+ }
+}
diff --git a/roles/openshift_grafana/meta/main.yml b/roles/openshift_grafana/meta/main.yml
new file mode 100644
index 000000000..8dea6f197
--- /dev/null
+++ b/roles/openshift_grafana/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: Eldad Marciano
+ description: Setup grafana pod
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.3
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - metrics
diff --git a/roles/openshift_grafana/tasks/gf-permissions.yml b/roles/openshift_grafana/tasks/gf-permissions.yml
new file mode 100644
index 000000000..9d3c741ee
--- /dev/null
+++ b/roles/openshift_grafana/tasks/gf-permissions.yml
@@ -0,0 +1,12 @@
+---
+- name: Create gf user on htpasswd
+ command: htpasswd -c /etc/origin/master/htpasswd gfadmin
+
+- name: Make sure master config use HTPasswdPasswordIdentityProvider
+ command: "sed -ie 's|AllowAllPasswordIdentityProvider|HTPasswdPasswordIdentityProvider\n file: /etc/origin/master/htpasswd|' /etc/origin/master/master-config.yaml"
+
+- name: Grant permission for gfuser
+ command: oc adm policy add-cluster-role-to-user cluster-reader gfadmin
+
+- name: Restart mater api
+ command: systemctl restart atomic-openshift-master-api.service
diff --git a/roles/openshift_grafana/tasks/main.yml b/roles/openshift_grafana/tasks/main.yml
new file mode 100644
index 000000000..6a06d40a9
--- /dev/null
+++ b/roles/openshift_grafana/tasks/main.yml
@@ -0,0 +1,122 @@
+---
+- name: Create grafana namespace
+ oc_project:
+ state: present
+ name: grafana
+
+- name: Configure Grafana Permissions
+ include_tasks: tasks/gf-permissions.yml
+ when: gf_oauth | default(false) | bool == true
+
+# TODO: we should grab this yaml file from openshift/origin
+- name: Templatize grafana yaml
+ template: src=grafana-ocp.yaml dest=/tmp/grafana-ocp.yaml
+ register:
+ cl_file: /tmp/grafana-ocp.yaml
+ when: gf_oauth | default(false) | bool == false
+
+# TODO: we should grab this yaml file from openshift/origin
+- name: Templatize grafana yaml
+ template: src=grafana-ocp-oauth.yaml dest=/tmp/grafana-ocp-oauth.yaml
+ register:
+ cl_file: /tmp/grafana-ocp-oauth.yaml
+ when: gf_oauth | default(false) | bool == true
+
+- name: Process the grafana file
+ oc_process:
+ namespace: grafana
+ template_name: "{{ cl_file }}"
+ create: True
+ when: gf_oauth | default(false) | bool == true
+
+- name: Wait to grafana be running
+ command: oc rollout status deployment/grafana-ocp
+
+- name: oc adm policy add-role-to-user view -z grafana-ocp -n {{ gf_prometheus_namespace }}
+ oc_adm_policy_user:
+ user: grafana-ocp
+ resource_kind: cluster-role
+ resource_name: view
+ state: present
+ role_namespace: "{{ gf_prometheus_namespace }}"
+
+- name: Get grafana route
+ oc_obj:
+ kind: route
+ name: grafana
+ namespace: grafana
+ register: route
+
+- name: Get prometheus route
+ oc_obj:
+ kind: route
+ name: prometheus
+ namespace: "{{ gf_prometheus_namespace }}"
+ register: route
+
+- name: Get the prometheus SA
+ oc_serviceaccount_secret:
+ state: list
+ service_account: prometheus
+ namespace: "{{ gf_prometheus_namespace }}"
+ register: sa
+
+- name: Get the management SA bearer token
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: "{{ gf_prometheus_namespace }}"
+ no_log: True
+ register: sa_secret
+
+- name: Get the SA bearer token for prometheus
+ set_fact:
+ token: "{{ sa_secret.results.encoded.token }}"
+
+- name: Convert to json
+ var:
+ ds_json: "{{ gf_body_tmp }} | to_json }}"
+
+- name: Set protocol type
+ var:
+ protocol: "{{ 'https' if {{ gf_oauth }} == true else 'http' }}"
+
+- name: Add gf datasrouce
+ uri:
+ url: "{{ protocol }}://{{ route }}/api/datasources"
+ user: admin
+ password: admin
+ method: POST
+ body: "{{ ds_json | regex_replace('grafana_name', {{ gf_datasource_name }}) | regex_replace('prometheus_url', 'https://'{{ prometheus }} ) | regex_replace('satoken', {{ token }}) }}"
+ headers:
+ Content-Type: "Content-Type: application/json"
+ register: add_ds
+
+- name: Regex setup ds name
+ replace:
+ path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ regexp: '${DS_PR}'
+ replace: '{{ gf_datasource_name }}'
+ backup: yes
+
+- name: Add new dashboard
+ uri:
+ url: "{{ protocol }}://{{ route }}/api/dashboards/db"
+ user: admin
+ password: admin
+ method: POST
+ body: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ headers:
+ Content-Type: "Content-Type: application/json"
+ register: add_ds
+
+- name: Regex json tear down
+ replace:
+ path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ regexp: '${DS_PR}'
+ replace: '{{ gf_datasource_name }}'
+ backup: yes
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 83e551b5d..b9c41d1b4 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -5,6 +5,7 @@ Health checks for OpenShift clusters.
import json
import operator
import os
+import re
import time
import collections
@@ -309,28 +310,38 @@ class OpenShiftCheck(object):
name_list = name_list.split(',')
return [name.strip() for name in name_list if name.strip()]
- @staticmethod
- def get_major_minor_version(openshift_image_tag):
+ def get_major_minor_version(self, openshift_image_tag=None):
"""Parse and return the deployed version of OpenShift as a tuple."""
- if openshift_image_tag and openshift_image_tag[0] == 'v':
- openshift_image_tag = openshift_image_tag[1:]
- # map major release versions across releases
- # to a common major version
- openshift_major_release_version = {
- "1": "3",
- }
+ version = openshift_image_tag or self.get_var("openshift_image_tag")
+ components = [int(component) for component in re.findall(r'\d+', version)]
- components = openshift_image_tag.split(".")
- if not components or len(components) < 2:
+ if len(components) < 2:
msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(openshift_image_tag))
+ raise OpenShiftCheckException(msg.format(version))
+
+ # map major release version across releases to OCP major version
+ components[0] = {1: 3}.get(components[0], components[0])
+
+ return tuple(int(x) for x in components[:2])
+
+ def get_required_version(self, name, version_map):
+ """Return the correct required version(s) for the current (or nearest) OpenShift version."""
+ openshift_version = self.get_major_minor_version()
+
+ earliest = min(version_map)
+ latest = max(version_map)
+ if openshift_version < earliest:
+ return version_map[earliest]
+ if openshift_version > latest:
+ return version_map[latest]
- if components[0] in openshift_major_release_version:
- components[0] = openshift_major_release_version[components[0]]
+ required_version = version_map.get(openshift_version)
+ if not required_version:
+ msg = "There is no recommended version of {} for the current version of OpenShift ({})"
+ raise OpenShiftCheckException(msg.format(name, ".".join(str(comp) for comp in openshift_version)))
- components = tuple(int(x) for x in components[:2])
- return components
+ return required_version
def find_ansible_mount(self, path):
"""Return the mount point for path from ansible_mounts."""
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 7afb8f730..d298fbab2 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -40,7 +40,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
# command for checking if remote registries have an image, without docker pull
- skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
+ skopeo_command = "{proxyvars} timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>"
def __init__(self, *args, **kwargs):
@@ -56,7 +56,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# ordered list of registries (according to inventory vars) that docker will try for unscoped images
regs = self.ensure_list("openshift_docker_additional_registries")
# currently one of these registries is added whether the user wants it or not.
- deployment_type = self.get_var("openshift_deployment_type")
+ deployment_type = self.get_var("openshift_deployment_type", default="")
if deployment_type == "origin" and "docker.io" not in regs:
regs.append("docker.io")
elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
@@ -76,11 +76,20 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if oreg_auth_user != '' and oreg_auth_password != '':
oreg_auth_user = self.template_var(oreg_auth_user)
oreg_auth_password = self.template_var(oreg_auth_password)
- self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
+ self.skopeo_command_creds = quote("--creds={}:{}".format(oreg_auth_user, oreg_auth_password))
# record whether we could reach a registry or not (and remember results)
self.reachable_registries = {}
+ # take note of any proxy settings needed
+ proxies = []
+ for var in ['http_proxy', 'https_proxy', 'no_proxy']:
+ # ansible vars are openshift_http_proxy, openshift_https_proxy, openshift_no_proxy
+ value = self.get_var("openshift_" + var, default=None)
+ if value:
+ proxies.append(var.upper() + "=" + quote(self.template_var(value)))
+ self.skopeo_proxy_vars = " ".join(proxies)
+
def is_active(self):
"""Skip hosts with unsupported deployment types."""
deployment_type = self.get_var("openshift_deployment_type")
@@ -249,11 +258,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if not self.reachable_registries[registry]:
continue # do not keep trying unreachable registries
- args = dict(registry=registry, image=image)
- args["tls"] = "false" if registry in self.registries["insecure"] else "true"
- args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else ""
+ args = dict(
+ proxyvars=self.skopeo_proxy_vars,
+ tls="false" if registry in self.registries["insecure"] else "true",
+ creds=self.skopeo_command_creds if registry == self.registries["oreg"] else "",
+ registry=quote(registry),
+ image=quote(image),
+ )
- result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)})
+ result = self.execute_module_with_retries("command", {
+ "_uses_shell": True,
+ "_raw_params": self.skopeo_command.format(**args),
+ })
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
@@ -263,6 +279,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def connect_to_registry(self, registry):
"""Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ if self.skopeo_proxy_vars != "":
+ # assume we can't connect directly; just waive the test
+ return True
+
# test a simple TCP connection
host, _, port = registry.partition(":")
port = port or 443
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 986a01f38..7f8c6ebdc 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -170,7 +170,7 @@ class Elasticsearch(LoggingCheck):
"""
errors = []
for pod_name in pods_by_name.keys():
- df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
index 3b1cf8baa..16ec3a7f6 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -5,12 +5,11 @@ Module for performing checks on a Kibana logging deployment
import json
import ssl
-try:
- from urllib2 import HTTPError, URLError
- import urllib2
-except ImportError:
- from urllib.error import HTTPError, URLError
- import urllib.request as urllib2
+# pylint can't find the package when its installed in virtualenv
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib import request
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
@@ -65,7 +64,7 @@ class Kibana(LoggingCheck):
# Verify that the url is returning a valid response
try:
# We only care if the url connects and responds
- return_code = urllib2.urlopen(url, context=ctx).getcode()
+ return_code = request.urlopen(url, context=ctx).getcode()
except HTTPError as httperr:
return httperr.reason
except URLError as urlerr:
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
index 0cad19842..58a2692bd 100644
--- a/roles/openshift_health_checker/openshift_checks/ovs_version.py
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp
currently installed version of OpenShift.
"""
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -16,10 +16,12 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
tags = ["health"]
openshift_to_ovs_version = {
- "3.7": ["2.6", "2.7", "2.8"],
- "3.6": ["2.6", "2.7", "2.8"],
- "3.5": ["2.6", "2.7"],
- "3.4": "2.4",
+ (3, 4): "2.4",
+ (3, 5): ["2.6", "2.7"],
+ (3, 6): ["2.6", "2.7", "2.8"],
+ (3, 7): ["2.6", "2.7", "2.8"],
+ (3, 8): ["2.6", "2.7", "2.8"],
+ (3, 9): ["2.6", "2.7", "2.8"],
}
def is_active(self):
@@ -40,16 +42,5 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
return self.execute_module("rpm_version", args)
def get_required_ovs_version(self):
- """Return the correct Open vSwitch version for the current OpenShift version"""
- openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))
-
- if openshift_version_tuple < (3, 5):
- return self.openshift_to_ovs_version["3.4"]
-
- openshift_version = ".".join(str(x) for x in openshift_version_tuple)
- ovs_version = self.openshift_to_ovs_version.get(openshift_version)
- if ovs_version:
- return self.openshift_to_ovs_version[openshift_version]
-
- msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(openshift_version))
+ """Return the correct Open vSwitch version(s) for the current OpenShift version."""
+ return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index f3a628e28..28aee8b35 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,8 +1,6 @@
"""Check that available RPM packages match the required versions."""
-import re
-
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -18,6 +16,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
(3, 5): ["2.6", "2.7"],
(3, 6): ["2.6", "2.7", "2.8"],
(3, 7): ["2.6", "2.7", "2.8"],
+ (3, 8): ["2.6", "2.7", "2.8"],
+ (3, 9): ["2.6", "2.7", "2.8"],
}
openshift_to_docker_version = {
@@ -27,11 +27,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
(3, 4): "1.12",
(3, 5): "1.12",
(3, 6): "1.12",
- }
-
- # map major OpenShift release versions across releases to a common major version
- map_major_release_version = {
- 1: 3,
+ (3, 7): "1.12",
+ (3, 8): "1.12",
+ (3, 9): ["1.12", "1.13"],
}
def is_active(self):
@@ -83,48 +81,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
- openshift_version = self.get_openshift_version_tuple()
-
- earliest = min(self.openshift_to_ovs_version)
- latest = max(self.openshift_to_ovs_version)
- if openshift_version < earliest:
- return self.openshift_to_ovs_version[earliest]
- if openshift_version > latest:
- return self.openshift_to_ovs_version[latest]
-
- ovs_version = self.openshift_to_ovs_version.get(openshift_version)
- if not ovs_version:
- msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version)))
-
- return ovs_version
+ return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
def get_required_docker_version(self):
"""Return the correct Docker version(s) for the current OpenShift version."""
- openshift_version = self.get_openshift_version_tuple()
-
- earliest = min(self.openshift_to_docker_version)
- latest = max(self.openshift_to_docker_version)
- if openshift_version < earliest:
- return self.openshift_to_docker_version[earliest]
- if openshift_version > latest:
- return self.openshift_to_docker_version[latest]
-
- docker_version = self.openshift_to_docker_version.get(openshift_version)
- if not docker_version:
- msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version)))
-
- return docker_version
-
- def get_openshift_version_tuple(self):
- """Return received image tag as a normalized (X, Y) minor version tuple."""
- version = self.get_var("openshift_image_tag")
- comps = [int(component) for component in re.findall(r'\d+', version)]
-
- if len(comps) < 2:
- msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(version))
-
- comps[0] = self.map_major_release_version.get(comps[0], comps[0])
- return tuple(comps[0:2])
+ return self.get_required_version("Docker", self.openshift_to_docker_version)
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
index 04a5e89c4..750d4b9e9 100644
--- a/roles/openshift_health_checker/test/kibana_test.py
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -1,12 +1,10 @@
import pytest
import json
-try:
- import urllib2
- from urllib2 import HTTPError, URLError
-except ImportError:
- from urllib.error import HTTPError, URLError
- import urllib.request as urllib2
+# pylint can't find the package when its installed in virtualenv
+from ansible.module_utils.six.moves.urllib import request # pylint: disable=import-error
+# pylint: disable=import-error
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.kibana import Kibana, OpenShiftCheckException
@@ -202,7 +200,7 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch):
if type(lib_result) is int:
return _http_return(lib_result)
raise lib_result
- monkeypatch.setattr(urllib2, 'urlopen', urlopen)
+ monkeypatch.setattr(request, 'urlopen', urlopen)
check = Kibana()
check._get_kibana_url = lambda: 'url'
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 0238f49d5..80c7a0541 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -1,26 +1,7 @@
import pytest
-from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
-
-
-def test_openshift_version_not_supported():
- def execute_module(*_):
- return {}
-
- openshift_release = '111.7.0'
-
- task_vars = dict(
- openshift=dict(common=dict()),
- openshift_release=openshift_release,
- openshift_image_tag='v' + openshift_release,
- openshift_deployment_type='origin',
- openshift_service_type='origin'
- )
-
- with pytest.raises(OpenShiftCheckException) as excinfo:
- OvsVersion(execute_module, task_vars).run()
-
- assert "no recommended version of Open vSwitch" in str(excinfo.value)
+from openshift_checks.ovs_version import OvsVersion
+from openshift_checks import OpenShiftCheckException
def test_invalid_openshift_release_format():
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index d2916f617..868b4bd12 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,6 +1,7 @@
import pytest
-from openshift_checks.package_version import PackageVersion, OpenShiftCheckException
+from openshift_checks.package_version import PackageVersion
+from openshift_checks import OpenShiftCheckException
def task_vars_for(openshift_release, deployment_type):
@@ -18,7 +19,7 @@ def task_vars_for(openshift_release, deployment_type):
def test_openshift_version_not_supported():
check = PackageVersion(None, task_vars_for("1.2.3", 'origin'))
- check.get_openshift_version_tuple = lambda: (3, 4, 1) # won't be in the dict
+ check.get_major_minor_version = lambda: (3, 4, 1) # won't be in the dict
with pytest.raises(OpenShiftCheckException) as excinfo:
check.get_required_ovs_version()
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
index 77f020357..fef945d51 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -1,4 +1,10 @@
---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX
+ register: mktempHosted
+ changed_when: False
+ check_mode: no
+
- name: Generate GlusterFS registry endpoints
template:
src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
@@ -14,3 +20,10 @@
with_items:
- "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktempHosted.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index cc3159a32..0786e2d2f 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index 9f2e6125d..ccea54aaf 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
index f04ce06d3..15ad4e9af 100644
--- a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
index c178cf432..7acefa0f0 100644
--- a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
index 4a2ee64f0..6fdba6580 100644
--- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -12,6 +12,7 @@
separator: '#'
content:
metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ metadata#annotations#openshift.io/logging.data.prefix: ".operations"
with_items: "{{ __logging_ops_projects.stdout.split(' ') }}"
loop_control:
loop_var: project
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index fbc3e3fd1..ced7397b5 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -131,13 +131,13 @@
when:
not openshift_logging_install_eventrouter | default(false) | bool
-# Update asset config in openshift-web-console namespace
-- name: Remove Kibana route information from web console asset config
+# Update console config in openshift-web-console namespace
+- name: Remove Kibana route information from the web console config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: loggingPublicURL
+ console_config_edits:
+ - key: clusterInfo#loggingPublicURL
value: ""
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 0d7f8c056..a40449bf6 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -19,7 +19,7 @@
command: >
{{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
- --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
+ --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test --overwrite=false
check_mode: no
when:
- not ca_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index ebd2d747b..3afd8680f 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -87,7 +87,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
_es_containers: "{{ outer_item.0.containers}}"
@@ -114,7 +114,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
loop_control:
@@ -151,7 +151,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -193,7 +193,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -321,9 +321,9 @@
- name: Add Kibana route information to web console asset config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: loggingPublicURL
+ console_config_edits:
+ - key: clusterInfo#loggingPublicURL
value: "https://{{ openshift_logging_kibana_hostname }}"
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index bc817075d..d28d1d160 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -30,7 +30,7 @@
{{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
- --signer-serial={{generated_certs_dir}}/ca.serial.txt
+ --signer-serial={{generated_certs_dir}}/ca.serial.txt --overwrite=false
check_mode: no
when:
- cert_info.hostnames is defined
diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
index 9182bddb2..16de6f252 100644
--- a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
@@ -1,6 +1,6 @@
---
- command: >
- oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Getting ES version for logging-es cluster"
@@ -10,7 +10,7 @@
when: _cluster_pods.stdout_lines | count > 0
- command: >
- oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _ops_cluster_pods
- name: "Getting ES version for logging-es-ops cluster"
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
index d55beec86..6bce13d1d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -19,7 +19,7 @@
## get all pods for the cluster
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
@@ -64,7 +64,7 @@
## we may need a new first pod to run against -- fetch them all again
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index 7870f43e2..4564f33dd 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -8,8 +8,10 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
+- name: Upgrade master packages - yum
+ command:
+ yum install -y {{ master_pkgs | join(' ') }} \
+ {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }}
vars:
master_pkgs:
- "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}"
@@ -17,6 +19,21 @@
- "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
- "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}"
- "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}"
- - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
register: result
until: result is succeeded
+ when: ansible_pkg_mgr == 'yum'
+
+- name: Upgrade master packages - dnf
+ dnf:
+ name: "{{ master_pkgs | join(',') }}"
+ state: present
+ vars:
+ master_pkgs:
+ - "{{ openshift_service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
+ register: result
+ until: result is succeeded
+ when: ansible_pkg_mgr == 'dnf'
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 0866fe0d2..0dd5d1621 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -74,10 +74,10 @@
- name: Add metrics route information to web console asset config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: metricsPublicURL
+ console_config_edits:
+ - key: clusterInfo#metricsPublicURL
value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index 8ccfb7192..057963c1a 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -16,7 +16,9 @@
apply -f {{ file_name }}
-n {{namespace}}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
@@ -28,5 +30,7 @@
register: version_changed
vars:
init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}"
- failed_when: "'error' in version_changed.stderr"
+ failed_when:
+ - "'error' in version_changed.stderr"
+ - "version_changed.rc != 0"
changed_when: version_changed.stdout | int > init_version | int
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 610c7b4e5..1664e9975 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -19,13 +19,13 @@
clusterrolebinding/hawkular-metrics
changed_when: delete_metrics.stdout != 'No resources found'
-# Update asset config in openshift-web-console namespace
-- name: Remove metrics route information from web console asset config
+# Update the web config in openshift-web-console namespace
+- name: Remove metrics route information from the web console config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: metricsPublicURL
+ console_config_edits:
+ - key: clusterInfo#metricsPublicURL
value: ""
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index c1fab4382..5864d3c03 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -48,6 +48,12 @@ openshift_node_kubelet_args_dict:
cloud-config:
- "{{ openshift_config_base ~ '/cloudprovider/gce.conf' }}"
node-labels: "{{ l_node_kubelet_node_labels }}"
+ azure:
+ cloud-provider:
+ - azure
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/azure.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
undefined:
node-labels: "{{ l_node_kubelet_node_labels }}"
@@ -71,6 +77,18 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+openshift_node_syscon_auth_mounts_l:
+- type: bind
+ source: "{{ oreg_auth_credentials_path }}"
+ destination: "/root/.docker"
+ options:
+ - ro
+
+# If we need to add new mounts in the future, or the user wants to mount data.
+# This should be in the same format as auth_mounts_l above.
+openshift_node_syscon_add_mounts_l: []
+
+
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
openshift_node_image_dict:
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 06b879050..008f209d7 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -14,4 +14,23 @@
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- "MASTER_SERVICE={{ openshift_service_type }}.service"
+ - 'ADDTL_MOUNTS={{ l_node_syscon_add_mounts2 }}'
state: latest
+ vars:
+ # We need to evaluate some variables here to ensure
+ # l_bind_docker_reg_auth is evaluated after registry_auth.yml has been
+ # processed.
+
+ # Determine if we want to include auth credentials mount.
+ l_node_syscon_auth_mounts_l: "{{ l_bind_docker_reg_auth | ternary(openshift_node_syscon_auth_mounts_l,[]) }}"
+
+ # Join any user-provided mounts and auth_mounts into a combined list.
+ l_node_syscon_add_mounts_l: "{{ openshift_node_syscon_add_mounts_l | union(l_node_syscon_auth_mounts_l) }}"
+
+ # We must prepend a ',' here to ensure the value is inserted properly into an
+ # existing json list in the container's config.json
+ # lib_utils_oo_l_of_d_to_csv is a custom filter plugin in roles/lib_utils/oo_filters.py
+ l_node_syscon_add_mounts: ",{{ l_node_syscon_add_mounts_l | lib_utils_oo_l_of_d_to_csv }}"
+ # if we have just a ',' then both mount lists were empty, we don't want to add
+ # anything to config.json
+ l_node_syscon_add_mounts2: "{{ (l_node_syscon_add_mounts != ',') | bool | ternary(l_node_syscon_add_mounts,'') }}"
diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml
index 721656117..dd9183382 100644
--- a/roles/openshift_node/tasks/upgrade/config_changes.yml
+++ b/roles/openshift_node/tasks/upgrade/config_changes.yml
@@ -21,6 +21,12 @@
path: "/var/lib/dockershim/sandbox/"
state: absent
+# https://bugzilla.redhat.com/show_bug.cgi?id=1518912
+- name: Clean up IPAM data
+ file:
+ path: "/var/lib/cni/networks/openshift-sdn/"
+ state: absent
+
# Disable Swap Block (pre)
- block:
- name: Remove swap entries from /etc/fstab
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 5f2a94ea2..7d817463c 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -32,7 +32,7 @@ masterClientConnectionOverrides:
contentType: application/vnd.kubernetes.protobuf
burst: 200
qps: 100
-masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
+masterKubeConfig: system:node:{{ openshift.common.hostname | lower }}.kubeconfig
{% if openshift_node_use_openshift_sdn | bool %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 5f73f3bdc..13d9fd718 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -18,9 +18,9 @@
stat:
path: "{{ openshift.common.config_base }}/node/{{ item }}"
with_items:
- - "system:node:{{ openshift.common.hostname }}.crt"
- - "system:node:{{ openshift.common.hostname }}.key"
- - "system:node:{{ openshift.common.hostname }}.kubeconfig"
+ - "system:node:{{ openshift.common.hostname | lower }}.crt"
+ - "system:node:{{ openshift.common.hostname | lower }}.key"
+ - "system:node:{{ openshift.common.hostname | lower }}.kubeconfig"
- ca.crt
- server.key
- server.crt
@@ -59,16 +59,16 @@
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
--certificate-authority={{ openshift_ca_cert }}
- --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}
+ --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}
--groups=system:nodes
--master={{ hostvars[openshift_ca_host].openshift.master.api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
- --user=system:node:{{ hostvars[item].openshift.common.hostname }}
+ --user=system:node:{{ hostvars[item].openshift.common.hostname | lower }}
--expire-days={{ openshift_node_cert_expire_days }}
args:
- creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}"
with_items: "{{ hostvars
| lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
| lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
@@ -78,16 +78,16 @@
- name: Generate the node server certificate
command: >
{{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert
- --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
- --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
+ --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt
+ --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.key
--expire-days={{ openshift_node_cert_expire_days }}
--overwrite=true
- --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
+ --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.hostname | lower }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.public_hostname | lower }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
args:
- creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname | lower }}/server.crt"
with_items: "{{ hostvars
| lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
| lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml
index 17ad8106d..12a6d3f94 100644
--- a/roles/openshift_node_certificates/vars/main.yml
+++ b/roles/openshift_node_certificates/vars/main.yml
@@ -1,7 +1,7 @@
---
openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
openshift_node_cert_dir: "{{ openshift.common.config_base }}/node"
-openshift_node_cert_subdir: "node-{{ openshift.common.hostname }}"
+openshift_node_cert_subdir: "node-{{ openshift.common.hostname | lower }}"
openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
openshift_node_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }}"
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 1be5d3a62..8e7c6288a 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -523,7 +523,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -589,8 +589,13 @@ resources:
secgrp:
- { get_resource: lb-secgrp }
- { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
+ floating_network:
+ if:
+ - no_floating
+ - ''
+ - {{ openshift_openstack_external_network_name }}
+{% if openshift_openstack_provider_network_name %}
+ attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_lb_volume_size }}
{% if not openshift_openstack_provider_network_name %}
@@ -655,7 +660,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -725,7 +730,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -792,8 +797,13 @@ resources:
{% endif %}
- { get_resource: infra-secgrp }
- { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
+ floating_network:
+ if:
+ - no_floating
+ - ''
+ - {{ openshift_openstack_external_network_name }}
+{% if openshift_openstack_provider_network_name %}
+ attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_infra_volume_size }}
{% if openshift_openstack_infra_server_group_policies|length > 0 %}
diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
index 1e73c9e1c..29b09f3c9 100644
--- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
@@ -102,13 +102,11 @@ parameters:
label: Attach-float-net
description: A switch for floating network port connection
-{% if not openshift_openstack_provider_network_name %}
floating_network:
type: string
default: ''
label: Floating network
description: Network to allocate floating IP from
-{% endif %}
availability_zone:
type: string
diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml
index ef9ab7f5f..865269b7a 100644
--- a/roles/openshift_persistent_volumes/tasks/pv.yml
+++ b/roles/openshift_persistent_volumes/tasks/pv.yml
@@ -13,5 +13,5 @@
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pv_create_output
when: persistent_volumes | length > 0
- failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+ failed_when: "('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) and pv_create_output.rc != 0"
changed_when: ('created' in pv_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml
index 2c5519192..6c12d128c 100644
--- a/roles/openshift_persistent_volumes/tasks/pvc.yml
+++ b/roles/openshift_persistent_volumes/tasks/pvc.yml
@@ -13,5 +13,5 @@
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pvc_create_output
when: persistent_volume_claims | length > 0
- failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+ failed_when: "('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) and pvc_create_output.rc != 0"
changed_when: ('created' in pvc_create_output.stdout)
diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
index a4ce53eae..239e1f1cc 100644
--- a/roles/openshift_provisioners/tasks/oc_apply.yaml
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -15,7 +15,9 @@
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
@@ -36,7 +38,9 @@
delete -f {{ file_name }}
-n {{ namespace }}
register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
+ failed_when:
+ - "'error' in generation_delete.stderr"
+ - "generation_delete.rc != 0"
changed_when: generation_delete.rc == 0
when: generation_apply.rc != 0
@@ -46,6 +50,8 @@
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: generation_apply.rc == 0
when: generation_apply.rc != 0
diff --git a/roles/openshift_version/tasks/check_available_rpms.yml b/roles/openshift_version/tasks/check_available_rpms.yml
index bdbc63d27..fea0daf77 100644
--- a/roles/openshift_version/tasks/check_available_rpms.yml
+++ b/roles/openshift_version/tasks/check_available_rpms.yml
@@ -1,7 +1,7 @@
---
- name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift_service_type}}"
+ name: "{{ openshift_service_type}}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
ignore_excluders: true
register: rpm_results
diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml
index e02a75eab..3ed1d2cfe 100644
--- a/roles/openshift_version/tasks/first_master_containerized_version.yml
+++ b/roles/openshift_version/tasks/first_master_containerized_version.yml
@@ -7,6 +7,7 @@
when:
- openshift_image_tag is defined
- openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
- name: Set containerized version to configure if openshift_release specified
set_fact:
@@ -20,7 +21,7 @@
docker run --rm {{ openshift_cli_image }}:latest version
register: cli_image_version
when:
- - openshift_version is not defined
+ - openshift_version is not defined or openshift_version_reinit | default(false)
- not openshift_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
@@ -34,7 +35,7 @@
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
- when: openshift_version is not defined
+ when: openshift_version is not defined or openshift_version_reinit | default(false)
# If we got an openshift_version like "3.2", lookup the latest 3.2 container version
# and use that value instead.
diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml
index 264baca65..5d92f90c6 100644
--- a/roles/openshift_version/tasks/first_master_rpm_version.yml
+++ b/roles/openshift_version/tasks/first_master_rpm_version.yml
@@ -6,6 +6,7 @@
when:
- openshift_pkg_version is defined
- openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
# These tasks should only be run against masters and nodes
- name: Set openshift_version for rpm installation
@@ -13,4 +14,7 @@
- set_fact:
openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}"
- when: openshift_version is not defined
+ when: openshift_version is not defined or ( openshift_version_reinit | default(false) )
+- set_fact:
+ openshift_pkg_version: "-{{ rpm_results.results.versions.available_versions.0 }}"
+ when: openshift_version_reinit | default(false)
diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml
index fbeb22d8b..eddd5ff42 100644
--- a/roles/openshift_version/tasks/masters_and_nodes.yml
+++ b/roles/openshift_version/tasks/masters_and_nodes.yml
@@ -6,9 +6,12 @@
include_tasks: check_available_rpms.yml
- name: Fail if rpm version and docker image version are different
fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ msg: "OCP rpm version {{ rpm_results.results.versions.available_versions.0 }} is different from OCP image version {{ openshift_version }}"
# Both versions have the same string representation
- when: rpm_results.results.versions.available_versions.0 != openshift_version
+ when:
+ - openshift_version not in rpm_results.results.versions.available_versions.0
+ - openshift_version_reinit | default(false)
+
# block when
when: not openshift_is_atomic | bool
diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml
index 4f395398c..c747f73a8 100644
--- a/roles/openshift_web_console/defaults/main.yml
+++ b/roles/openshift_web_console/defaults/main.yml
@@ -1,3 +1,2 @@
---
-# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters
-openshift_web_console_nodeselector: {"region":"infra"}
+openshift_web_console_nodeselector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}"
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index 12916961b..cc5eef47d 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -21,43 +21,123 @@
node_selector:
- ""
-- name: Make temp directory for asset config files
+- name: Make temp directory for web console templates
command: mktemp -d /tmp/console-ansible-XXXXXX
register: mktemp
changed_when: False
-- name: Copy asset config template to temp directory
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
+- name: Copy web console templates to temp directory
copy:
src: "{{ __console_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- "{{ __console_template_file }}"
+ - "{{ __console_rbac_file }}"
- "{{ __console_config_file }}"
-- name: Update asset config properties
- yedit:
- src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- edits:
- - key: logoutURL
- value: "{{ openshift.master.logout_url | default('') }}"
- - key: publicURL
- # Must have a trailing slash
- value: "{{ openshift.master.public_console_url }}/"
- - key: masterPublicURL
- value: "{{ openshift.master.public_api_url }}"
+# Check if an existing webconsole-config config map exists. If so, use those
+# contents so we don't overwrite changes.
+- name: Read the existing web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: list
+ register: webconsole_config_map
+
+- set_fact:
+ existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}"
+
+- name: Copy the existing web console config to temp directory
+ copy:
+ content: "{{ existing_config_map_data['webconsole-config.yaml'] }}"
+ dest: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ when: existing_config_map_data['webconsole-config.yaml'] is defined
+
+# Generate a new config when a config map is not defined.
+- when: existing_config_map_data['webconsole-config.yaml'] is not defined
+ block:
+ # Migrate the previous master-config.yaml asset config if it exists into the new
+ # web console config config map.
+ - name: Read existing assetConfig in master-config.yaml
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: master_config_output
+
+ - set_fact:
+ config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
+
+ # Update properties in the config template based on inventory vars when the
+ # asset config does not exist.
+ - name: Set web console config properties from inventory variables
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+ - key: features#clusterResourceOverridesEnabled
+ value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(false) }}"
+ - key: extensions#scriptURLs
+ value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ - key: extensions#stylesheetURLs
+ value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ - key: extensions#properties
+ value: "{{ openshift_web_console_extension_properties | default({}) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is not defined
+
+ - name: Migrate assetConfig from master-config.yaml
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ value: "{{ config_to_migrate.assetConfig.publicURL }}"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ config_to_migrate.assetConfig.masterPublicURL }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}"
+ - key: clusterInfo#metricsPublicURL
+ value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}"
+ - key: clusterInfo#loggingPublicURL
+ value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}"
+ - key: servingInfo#maxRequestsInFlight
+ value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
+ - key: servingInfo#requestTimeoutSeconds
+ value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is defined
- slurp:
src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- register: config
+ register: updated_console_config
+
+- name: Reconcile with the web console RBAC file
+ shell: >
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" --config={{ mktemp.stdout }}/admin.kubeconfig
+ | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f -
-- name: Apply template file
+- name: Apply the web console template file
shell: >
{{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}"
- --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param API_SERVER_CONFIG="{{ updated_console_config['content'] | b64decode }}"
--param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}"
--param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }}
--param REPLICA_COUNT="{{ openshift_web_console_replica_count }}"
- | {{ openshift_client_binary }} apply -f -
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
- name: Verify that the web console is running
command: >
diff --git a/roles/openshift_web_console/tasks/rollout_console.yml b/roles/openshift_web_console/tasks/rollout_console.yml
new file mode 100644
index 000000000..75682ba1d
--- /dev/null
+++ b/roles/openshift_web_console/tasks/rollout_console.yml
@@ -0,0 +1,20 @@
+---
+- name: Check if console deployment exists
+ oc_obj:
+ kind: deployments
+ name: webconsole
+ namespace: openshift-web-console
+ state: list
+ register: console_deployment
+
+# There's currently no command to trigger a rollout for a k8s deployment
+# without changing the pod spec. Add an annotation to force a rollout.
+- name: Rollout updated web console deployment
+ oc_edit:
+ kind: deployments
+ name: webconsole
+ namespace: openshift-web-console
+ separator: '#'
+ content:
+ spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}"
+ when: console_deployment.results.results.0 | length > 0
diff --git a/roles/openshift_web_console/tasks/update_asset_config.yml b/roles/openshift_web_console/tasks/update_asset_config.yml
deleted file mode 100644
index 0992b32e1..000000000
--- a/roles/openshift_web_console/tasks/update_asset_config.yml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-# This task updates asset config values in the webconsole-config config map in
-# the openshift-web-console namespace. The values to set are pased in the
-# variable `asset_config_edits`, which is an array of objects with `key` and
-# `value` properties in the same format as `yedit` module `edits`. Only
-# properties passed are updated.
-#
-# Note that this triggers a redeployment on the console and a brief downtime
-# since it uses a `Recreate` strategy.
-#
-# Example usage:
-#
-# - include_role:
-# name: openshift_web_console
-# tasks_from: update_asset_config.yml
-# vars:
-# asset_config_edits:
-# - key: loggingPublicURL
-# value: "https://{{ openshift_logging_kibana_hostname }}"
-# when: openshift_web_console_install | default(true) | bool
-
-- name: Read web console config map
- oc_configmap:
- namespace: openshift-web-console
- name: webconsole-config
- state: list
- register: webconsole_config
-
-- name: Make temp directory
- command: mktemp -d /tmp/console-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy asset config to temp file
- copy:
- content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}"
- dest: "{{ mktemp.stdout }}/webconsole-config.yaml"
-
-- name: Change asset config properties
- yedit:
- src: "{{ mktemp.stdout }}/webconsole-config.yaml"
- edits: "{{asset_config_edits}}"
-
-- name: Update web console config map
- oc_configmap:
- namespace: openshift-web-console
- name: webconsole-config
- state: present
- from_file:
- webconsole-config.yaml: "{{ mktemp.stdout }}/webconsole-config.yaml"
-
-- name: Remove temp directory
- file:
- state: absent
- name: "{{ mktemp.stdout }}"
- changed_when: False
-
-# There's currently no command to trigger a rollout for a k8s deployment
-# without changing the pod spec. Add an annotation to force a rollout after
-# the config map has been edited.
-- name: Rollout updated web console deployment
- oc_edit:
- kind: deployments
- name: webconsole
- namespace: openshift-web-console
- separator: '#'
- content:
- spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}"
diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
new file mode 100644
index 000000000..967222ea4
--- /dev/null
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -0,0 +1,67 @@
+---
+# This task updates asset config values in the webconsole-config config map in
+# the openshift-web-console namespace. The values to set are pased in the
+# variable `console_config_edits`, which is an array of objects with `key` and
+# `value` properties in the same format as `yedit` module `edits`. Only
+# properties passed are updated. The separator for nested properties is `#`.
+#
+# Note that this triggers a redeployment on the console and a brief downtime
+# since it uses a `Recreate` strategy.
+#
+# Example usage:
+#
+# - include_role:
+# name: openshift_web_console
+# tasks_from: update_console_config.yml
+# vars:
+# console_config_edits:
+# - key: clusterInfo#loggingPublicURL
+# value: "https://{{ openshift_logging_kibana_hostname }}"
+# when: openshift_web_console_install | default(true) | bool
+
+- name: Read the existing web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: list
+ register: webconsole_config_map
+
+- set_fact:
+ existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}"
+
+- when: existing_config_map_data['webconsole-config.yaml'] is defined
+ block:
+ - name: Make temp directory
+ command: mktemp -d /tmp/console-ansible-XXXXXX
+ register: mktemp_console
+ changed_when: False
+
+ - name: Copy the existing web console config to temp directory
+ copy:
+ content: "{{ existing_config_map_data['webconsole-config.yaml'] }}"
+ dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+
+ - name: Change web console config properties
+ yedit:
+ src: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ edits: "{{console_config_edits}}"
+ separator: '#'
+ state: present
+
+ - name: Update web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: present
+ from_file:
+ webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ register: update_console_config_map
+
+ - name: Remove temp directory
+ file:
+ state: absent
+ name: "{{ mktemp_console.stdout }}"
+ changed_when: False
+
+ - include_tasks: rollout_console.yml
+ when: update_console_config_map.changed | bool
diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml
index 7adb8a0d0..42d331ac5 100644
--- a/roles/openshift_web_console/vars/default_images.yml
+++ b/roles/openshift_web_console/vars/default_images.yml
@@ -1,4 +1,4 @@
---
-__openshift_web_console_prefix: "docker.io/openshift/"
+__openshift_web_console_prefix: "docker.io/openshift/origin-"
__openshift_web_console_version: "latest"
-__openshift_web_console_image_name: "origin-web-console"
+__openshift_web_console_image_name: "web-console"
diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml
index 80bc56a17..e91048e38 100644
--- a/roles/openshift_web_console/vars/main.yml
+++ b/roles/openshift_web_console/vars/main.yml
@@ -2,4 +2,5 @@
__console_files_location: "../../../files/origin-components/"
__console_template_file: "console-template.yaml"
+__console_rbac_file: "console-rbac-template.yaml"
__console_config_file: "console-config.yaml"
diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml
index 721ac1d27..375c22067 100644
--- a/roles/openshift_web_console/vars/openshift-enterprise.yml
+++ b/roles/openshift_web_console/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
-__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/ose-"
__openshift_web_console_version: "v3.9"
-__openshift_web_console_image_name: "ose-web-console"
+__openshift_web_console_image_name: "web-console"
diff --git a/roles/os_firewall/tasks/firewalld.yml b/roles/os_firewall/tasks/firewalld.yml
index 4eae31596..fa933da51 100644
--- a/roles/os_firewall/tasks/firewalld.yml
+++ b/roles/os_firewall/tasks/firewalld.yml
@@ -2,7 +2,9 @@
- name: Fail - Firewalld is not supported on Atomic Host
fail:
msg: "Firewalld is not supported on Atomic Host"
- when: r_os_firewall_is_atomic | bool
+ when:
+ - r_os_firewall_is_atomic | bool
+ - not openshift_enable_unsupported_configurations | default(false)
- name: Install firewalld packages
package:
@@ -10,6 +12,7 @@
state: present
register: result
until: result is succeeded
+ when: not r_os_firewall_is_atomic | bool
- name: Ensure iptables services are not enabled
systemd:
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index c32872d24..3465832cc 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -3,4 +3,4 @@
template_service_broker_remove: False
template_service_broker_install: True
openshift_template_service_broker_namespaces: ['openshift']
-template_service_broker_selector: { "region": "infra" }
+template_service_broker_selector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}"
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 604e94602..4e6ad2ae5 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -22,6 +22,11 @@
register: mktemp
changed_when: False
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -43,16 +48,18 @@
- name: Apply template file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig
+ -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
--param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }}
- | {{ openshift_client_binary }} apply -f -
+ | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
# reconcile with rbac
- name: Reconcile with RBAC file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}"
+ | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f -
# Check that the TSB is running
- name: Verify that TSB is running
@@ -79,9 +86,15 @@
# Register with broker
- name: Register TSB with broker
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
- file:
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
+
+- name: Rollout console so it discovers the template service broker is installed
+ include_role:
+ name: openshift_web_console
+ tasks_from: rollout_console.yml
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index db1b558e4..48dc1327e 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -3,6 +3,11 @@
register: mktemp
changed_when: False
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -12,11 +17,11 @@
- name: Delete TSB broker
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f -
- name: Delete TSB objects
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f -
- name: empty out tech preview extension file for service console UI
copy:
@@ -31,3 +36,9 @@
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
+
+- name: Rollout console so it discovers the template service broker is removed
+ include_role:
+ name: openshift_web_console
+ tasks_from: rollout_console.yml
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml
index 662d65d9f..dc164a4db 100644
--- a/roles/template_service_broker/vars/default_images.yml
+++ b/roles/template_service_broker/vars/default_images.yml
@@ -1,4 +1,4 @@
---
-__template_service_broker_prefix: "docker.io/openshift/"
+__template_service_broker_prefix: "docker.io/openshift/origin-"
__template_service_broker_version: "latest"
-__template_service_broker_image_name: "origin-template-service-broker"
+__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml
index 16a08e72f..b65b97691 100644
--- a/roles/template_service_broker/vars/openshift-enterprise.yml
+++ b/roles/template_service_broker/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
-__template_service_broker_prefix: "registry.access.redhat.com/openshift3/"
+__template_service_broker_prefix: "registry.access.redhat.com/openshift3/ose-"
__template_service_broker_version: "v3.7"
-__template_service_broker_image_name: "ose-template-service-broker"
+__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml
index 4a28d47b2..5129f4471 100644
--- a/roles/tuned/tasks/main.yml
+++ b/roles/tuned/tasks/main.yml
@@ -28,7 +28,12 @@
when: item.state == 'file'
- name: Make tuned use the recommended tuned profile on restart
- file: path=/etc/tuned/active_profile state=absent
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/tuned/active_profile
+ - /etc/tuned/profile_mode
- name: Restart tuned service
systemd:
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
index 433e29dde..6251cd22b 100644
--- a/utils/src/ooinstall/ansible_plugins/facts_callback.py
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -7,11 +7,7 @@ import yaml
from ansible.plugins.callback import CallbackBase
from ansible.parsing.yaml.dumper import AnsibleDumper
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import u
-except ImportError:
- from ansible.module_utils.six import u
+from ansible.module_utils.six import u
# pylint: disable=super-init-not-called
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 1226242d0..eb42721b5 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -820,7 +820,7 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
click.echo(message)
-@click.group()
+@click.group(context_settings=dict(max_content_width=120))
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
@@ -932,97 +932,16 @@ def uninstall(ctx):
openshift_ansible.run_uninstall_playbook(hosts, verbose)
-@click.command()
+@click.command(context_settings=dict(max_content_width=120))
@click.option('--latest-minor', '-l', is_flag=True, default=False)
@click.option('--next-major', '-n', is_flag=True, default=False)
@click.pass_context
# pylint: disable=too-many-statements,too-many-branches
def upgrade(ctx, latest_minor, next_major):
- oo_cfg = ctx.obj['oo_cfg']
-
- if len(oo_cfg.deployment.hosts) == 0:
- click.echo("No hosts defined in: %s" % oo_cfg.config_path)
- sys.exit(1)
-
- variant = oo_cfg.settings['variant']
- if find_variant(variant)[0] is None:
- click.echo("%s is not a supported variant for upgrade." % variant)
- sys.exit(0)
-
- old_version = oo_cfg.settings['variant_version']
-
- try:
- mapping = UPGRADE_MAPPINGS[old_version]
- except KeyError:
- click.echo('No upgrades available for %s %s' % (variant, old_version))
- sys.exit(0)
-
- message = """
- This tool will help you upgrade your existing OpenShift installation.
- Currently running: %s %s
-"""
- click.echo(message % (variant, old_version))
-
- # Map the dynamic upgrade options to the playbook to run for each.
- # Index offset by 1.
- # List contains tuples of booleans for (latest_minor, next_major)
- selections = []
- if not (latest_minor or next_major):
- i = 0
- if 'minor_playbook' in mapping:
- click.echo("(%s) Update to latest %s" % (i + 1, old_version))
- selections.append((True, False))
- i += 1
- if 'major_playbook' in mapping:
- click.echo("(%s) Upgrade to next release: %s" % (i + 1, mapping['major_version']))
- selections.append((False, True))
- i += 1
-
- response = click.prompt("\nChoose an option from above",
- type=click.Choice(list(map(str, range(1, len(selections) + 1)))))
- latest_minor, next_major = selections[int(response) - 1]
-
- if next_major:
- if 'major_playbook' not in mapping:
- click.echo("No major upgrade supported for %s %s with this version "
- "of atomic-openshift-utils." % (variant, old_version))
- sys.exit(0)
- playbook = mapping['major_playbook']
- new_version = mapping['major_version']
- # Update config to reflect the version we're targeting, we'll write
- # to disk once Ansible completes successfully, not before.
- oo_cfg.settings['variant_version'] = new_version
- if oo_cfg.settings['variant'] == 'enterprise':
- oo_cfg.settings['variant'] = 'openshift-enterprise'
-
- if latest_minor:
- if 'minor_playbook' not in mapping:
- click.echo("No minor upgrade supported for %s %s with this version "
- "of atomic-openshift-utils." % (variant, old_version))
- sys.exit(0)
- playbook = mapping['minor_playbook']
- new_version = old_version
-
- click.echo("OpenShift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % (
- variant, old_version, oo_cfg.settings['variant'], new_version))
- for host in oo_cfg.deployment.hosts:
- click.echo(" * %s" % host.connect_to)
-
- if not ctx.obj['unattended']:
- # Prompt interactively to confirm:
- if not click.confirm("\nDo you want to proceed?"):
- click.echo("Upgrade cancelled.")
- sys.exit(0)
-
- retcode = openshift_ansible.run_upgrade_playbook(oo_cfg.deployment.hosts,
- playbook,
- ctx.obj['verbose'])
- if retcode > 0:
- click.echo("Errors encountered during upgrade, please check %s." %
- oo_cfg.settings['ansible_log_path'])
- else:
- oo_cfg.save_to_disk()
- click.echo("Upgrade completed! Rebooting all hosts is recommended.")
+ click.echo("Upgrades are no longer supported by this version of installer")
+ click.echo("Please see the documentation for manual upgrade:")
+ click.echo("https://docs.openshift.com/container-platform/latest/install_config/upgrading/automated_upgrades.html")
+ sys.exit(1)
@click.command()
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index dda8eb4c6..84a76fa53 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -122,7 +122,7 @@ def write_inventory_vars(base_inventory, lb):
if CFG.deployment.variables['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=yes\n')
- base_inventory.write('openshift_override_hostname_check=true\n')
+ base_inventory.write('openshift_hostname_check=false\n')
if lb is not None:
base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))
@@ -321,21 +321,3 @@ def run_uninstall_playbook(hosts, verbose=False):
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
return run_ansible(playbook, inventory_file, facts_env, verbose)
-
-
-def run_upgrade_playbook(hosts, playbook, verbose=False):
- playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
- 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook))
-
- # TODO: Upgrade inventory for upgrade?
- inventory_file = generate_inventory(hosts)
- facts_env = os.environ.copy()
- if 'ansible_log_path' in CFG.settings:
- facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
- if 'ansible_config' in CFG.settings:
- facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- # override the ansible config for our main playbook run
- if 'ansible_quiet_config' in CFG.settings:
- facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
-
- return run_ansible(playbook, inventory_file, facts_env, verbose)
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 673997c42..2259f3416 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -384,6 +384,7 @@ deployment:
storage:
"""
+
class UnattendedCliTests(OOCliFixture):
def setUp(self):
@@ -402,8 +403,9 @@ class UnattendedCliTests(OOCliFixture):
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -481,8 +483,9 @@ class UnattendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -490,16 +493,18 @@ class UnattendedCliTests(OOCliFixture):
load_facts_args = load_facts_mock.call_args[0]
self.assertEquals(os.path.join(self.work_dir, "hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ load_facts_args[0])
+ self.assertEquals(
+ os.path.join(self.work_dir, "playbooks/byo/openshift_facts.yml"),
+ load_facts_args[1])
env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
+ self.assertEquals(
+ os.path.join(self.work_dir, '.ansible/callback_facts.yaml'),
env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
# If user running test has rpm installed, this might be set to default:
- self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ self.assertTrue(
+ 'ANSIBLE_CONFIG' not in env_vars or
env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
# Make sure we ran on the expected masters and nodes:
@@ -515,8 +520,9 @@ class UnattendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), merged_config)
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ merged_config)
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -526,9 +532,9 @@ class UnattendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('root',
- inventory.get('OSEv3:vars', 'ansible_ssh_user'))
+ inventory.get('OSEv3:vars', 'ansible_ssh_user'))
self.assertEquals('openshift-enterprise',
- inventory.get('OSEv3:vars', 'deployment_type'))
+ inventory.get('OSEv3:vars', 'deployment_type'))
# Check the masters:
self.assertEquals(1, len(inventory.items('masters')))
@@ -546,13 +552,13 @@ class UnattendedCliTests(OOCliFixture):
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_variant_version_latest_assumed(self, load_facts_mock,
- run_playbook_mock):
+ def test_variant_version_latest_assumed(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -569,19 +575,18 @@ class UnattendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('openshift-enterprise',
- inventory.get('OSEv3:vars', 'deployment_type'))
+ inventory.get('OSEv3:vars', 'deployment_type'))
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
- def test_variant_version_preserved(self, load_facts_mock,
- run_playbook_mock):
+ def test_variant_version_preserved(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
config = SAMPLE_CONFIG % 'openshift-enterprise'
config = '%s\n%s' % (config, 'variant_version: 3.3')
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), config)
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), config)
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -597,7 +602,7 @@ class UnattendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('openshift-enterprise',
- inventory.get('OSEv3:vars', 'deployment_type'))
+ inventory.get('OSEv3:vars', 'deployment_type'))
# unattended with bad config file and no installed hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -606,25 +611,28 @@ class UnattendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), BAD_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ BAD_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
self.assertEquals(1, result.exit_code)
- self.assertTrue("You must specify either an ip or hostname"
+ self.assertTrue(
+ "You must specify either an ip or hostname"
in result.output)
- #unattended with three masters, one node, and haproxy
+ # unattended with three masters, one node, and haproxy
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -636,15 +644,16 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals(6, len(hosts))
self.assertEquals(6, len(hosts_to_run_on))
- #unattended with two masters, one node, and haproxy
+ # unattended with two masters, one node, and haproxy
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -653,15 +662,16 @@ class UnattendedCliTests(OOCliFixture):
self.assert_result(result, 1)
self.assertTrue("A minimum of 3 masters are required" in result.output)
- #unattended with three masters, one node, but no load balancer specified:
+ # unattended with three masters, one node, but no load balancer specified:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -670,15 +680,16 @@ class UnattendedCliTests(OOCliFixture):
self.assert_result(result, 1)
self.assertTrue('No master load balancer specified in config' in result.output)
- #unattended with three masters, one node, and one of the masters reused as load balancer:
+ # unattended with three masters, one node, and one of the masters reused as load balancer:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -686,15 +697,16 @@ class UnattendedCliTests(OOCliFixture):
# This is not a valid configuration:
self.assert_result(result, 1)
- #unattended with preconfigured lb
+ # unattended with preconfigured lb
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise')
+ config_file = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'),
+ QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
result = self.runner.invoke(cli.cli, self.cli_args)
@@ -706,6 +718,7 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals(6, len(hosts))
self.assertEquals(6, len(hosts_to_run_on))
+
class AttendedCliTests(OOCliFixture):
def setUp(self):
@@ -720,17 +733,18 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', False, False),
- ('10.0.0.3', False, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- storage='10.1.0.1',)
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.1.0.1',)
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -741,12 +755,12 @@ class AttendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
- self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
- 'openshift_schedulable=False')
- self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2',
- 'openshift_schedulable=True')
- self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3',
- 'openshift_schedulable=True')
+ self.assert_inventory_host_var(
+ inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(
+ inventory, 'nodes', '10.0.0.2', 'openshift_schedulable=True')
+ self.assert_inventory_host_var_unset(
+ inventory, 'nodes', '10.0.0.3', 'openshift_schedulable=True')
# interactive with config file and some installed some uninstalled hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -762,15 +776,16 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', False, False),
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
],
- add_nodes=[('10.0.0.3', False, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- storage='10.0.0.1',)
+ add_nodes=[('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.0.0.1',)
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
self.cli_args,
@@ -781,7 +796,6 @@ class AttendedCliTests(OOCliFixture):
self.assertTrue('scaleup' in result.output)
self.assert_result(result, 1)
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock):
@@ -830,26 +844,27 @@ class AttendedCliTests(OOCliFixture):
# exp_hosts_to_run_on_len=2,
# force=False)
- #interactive multimaster: one more node than master
+ # interactive multimaster: one more node than master
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', True, False),
- ('10.0.0.3', True, False),
- ('10.0.0.4', False, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- master_lb=('10.0.0.5', False),
- storage='10.1.0.1',)
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False),
+ ('10.0.0.4', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False),
+ storage='10.1.0.1',)
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -872,25 +887,26 @@ class AttendedCliTests(OOCliFixture):
self.assertTrue(inventory.has_section('etcd'))
self.assertEquals(3, len(inventory.items('etcd')))
- #interactive multimaster: identical masters and nodes
+ # interactive multimaster: identical masters and nodes
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', True, False),
- ('10.0.0.3', True, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- master_lb=('10.0.0.5', False),
- storage='10.1.0.1',)
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False),
+ storage='10.1.0.1',)
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -919,7 +935,9 @@ class AttendedCliTests(OOCliFixture):
full_line = "%s=%s" % (a, b)
tokens = full_line.split()
if tokens[0] == host:
- self.assertTrue(variable in tokens[1:], "Unable to find %s in line: %s" % (variable, full_line))
+ self.assertTrue(
+ variable in tokens[1:],
+ "Unable to find %s in line: %s" % (variable, full_line))
return
self.fail("unable to find host %s in inventory" % host)
@@ -938,45 +956,46 @@ class AttendedCliTests(OOCliFixture):
return
self.fail("unable to find host %s in inventory" % host)
-
- #interactive multimaster: attempting to use a master as the load balancer should fail:
+ # interactive multimaster: attempting to use a master as the load balancer should fail:
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', True, False),
- ('10.0.0.3', False, False),
- ('10.0.0.4', True, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- master_lb=(['10.0.0.2', '10.0.0.5'], False),
- storage='10.1.0.1')
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', False, False),
+ ('10.0.0.4', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=(['10.0.0.2', '10.0.0.5'], False),
+ storage='10.1.0.1')
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
- #interactive all-in-one
+ # interactive all-in-one
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_all_in_one(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- storage='10.0.0.1')
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.0.0.1')
self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -990,25 +1009,25 @@ class AttendedCliTests(OOCliFixture):
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=True')
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_gen_inventory(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = build_input(hosts=[
- ('10.0.0.1', True, False),
- ('10.0.0.2', False, False),
- ('10.0.0.3', False, False)],
- ssh_user='root',
- variant_num=1,
- confirm_facts='y',
- storage='10.1.0.1',)
+ cli_input = build_input(
+ hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.1.0.1',)
self.cli_args.append("install")
self.cli_args.append("--gen-inventory")
- result = self.runner.invoke(cli.cli, self.cli_args,
- input=cli_input)
+ result = self.runner.invoke(
+ cli.cli, self.cli_args, input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
@@ -1021,12 +1040,12 @@ class AttendedCliTests(OOCliFixture):
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
- self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
- 'openshift_schedulable=False')
- self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2',
- 'openshift_schedulable=True')
- self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3',
- 'openshift_schedulable=True')
+ self.assert_inventory_host_var(
+ inventory, 'nodes', '10.0.0.1', 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(
+ inventory, 'nodes', '10.0.0.2', 'openshift_schedulable=True')
+ self.assert_inventory_host_var_unset(
+ inventory, 'nodes', '10.0.0.3', 'openshift_schedulable=True')
# TODO: test with config file, attended add node
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 873ac4a27..5c0d1d2c1 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -43,6 +43,7 @@ deployment:
node:
"""
+
def read_yaml(config_file_path):
cfg_f = open(config_file_path, 'r')
config = yaml.safe_load(cfg_f.read())
@@ -105,7 +106,7 @@ class OOCliFixture(OOInstallFixture):
self.assertTrue('ip' in host)
self.assertTrue('public_ip' in host)
- #pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments
def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
run_playbook_mock, cli_input,
exp_hosts_len=None, exp_hosts_to_run_on_len=None,
@@ -152,7 +153,7 @@ class OOCliFixture(OOInstallFixture):
self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
+# pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def build_input(ssh_user=None, hosts=None, variant_num=None,
add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
master_lb=('', False), storage=None):
@@ -190,7 +191,7 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
else:
inputs.append('rpm')
- #inputs.append('rpm')
+ # inputs.append('rpm')
# We should not be prompted to add more hosts if we're currently at
# 2 masters, this is an invalid HA configuration, so this question
# will not be asked, and the user must enter the next host:
@@ -224,13 +225,13 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
inputs.append('y')
inputs.append('1') # Add more nodes
i = 0
- for (host, is_master, is_containerized) in add_nodes:
+ for (host, _, is_containerized) in add_nodes:
inputs.append(host)
if is_containerized:
inputs.append('container')
else:
inputs.append('rpm')
- #inputs.append('rpm')
+ # inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts
else:
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 5651e6e7a..80cdbe618 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -107,6 +107,7 @@ deployment:
node:
"""
+
class OOInstallFixture(unittest.TestCase):
def setUp(self):
@@ -133,13 +134,12 @@ class OOInstallFixture(unittest.TestCase):
return path
-
class OOConfigTests(OOInstallFixture):
def test_load_config(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
self.assertEquals(3, len(ooconfig.deployment.hosts))
@@ -155,26 +155,25 @@ class OOConfigTests(OOInstallFixture):
def test_load_bad_config(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), CONFIG_BAD)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), CONFIG_BAD)
try:
OOConfig(cfg_path)
assert False
except OOConfigInvalidHostError:
assert True
-
def test_load_complete_facts(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
missing_host_facts = ooconfig.calc_missing_facts()
self.assertEquals(0, len(missing_host_facts))
# Test missing optional facts the user must confirm:
def test_load_host_incomplete_facts(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
ooconfig = OOConfig(cfg_path)
missing_host_facts = ooconfig.calc_missing_facts()
self.assertEquals(2, len(missing_host_facts))
@@ -182,8 +181,8 @@ class OOConfigTests(OOInstallFixture):
self.assertEquals(3, len(missing_host_facts['10.0.0.3']))
def test_write_config(self):
- cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG)
+ cfg_path = self.write_config(
+ os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
ooconfig.save_to_disk()
@@ -191,8 +190,6 @@ class OOConfigTests(OOInstallFixture):
written_config = yaml.safe_load(f.read())
f.close()
-
-
self.assertEquals(3, len(written_config['deployment']['hosts']))
for h in written_config['deployment']['hosts']:
self.assertTrue('ip' in h)
@@ -259,8 +256,10 @@ class HostTests(OOInstallFixture):
# Given the `yaml_props` above we should see a line like this:
# openshift_node_labels="{'region': 'infra'}"
- node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"''' # Quotes around the hash
- node_labels_bad = '''openshift_node_labels={'region': 'infra'}''' # No quotes around the hash
+ # Quotes around the hash
+ node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"'''
+ # No quotes around the hash
+ node_labels_bad = '''openshift_node_labels={'region': 'infra'}'''
# The good line is present in the written inventory line
self.assertIn(node_labels_expected, legacy_inventory_line)
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index cabeaee34..a72e429d1 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -29,7 +29,6 @@ class TestUtils(unittest.TestCase):
mock.call('OO_FOO: bar'),
]
-
######################################################################
# Validate ooinstall.utils.debug_env functionality