summaryrefslogtreecommitdiffstats
path: root/roles/openshift_node_upgrade
diff options
context:
space:
mode:
authorMichael Gugino <mgugino@redhat.com>2017-11-09 17:00:00 -0500
committerMichael Gugino <mgugino@redhat.com>2017-11-16 09:03:35 -0500
commitafa4fd5799b0ff43d625d061e4c2bde66b5fb86a (patch)
tree2654bb42deb7dcd3b907b1476ec0fbe55c133fb2 /roles/openshift_node_upgrade
parent8a85bc3a00efb654238a58e1faa2a2629d9360b1 (diff)
downloadopenshift-afa4fd5799b0ff43d625d061e4c2bde66b5fb86a.tar.gz
openshift-afa4fd5799b0ff43d625d061e4c2bde66b5fb86a.tar.bz2
openshift-afa4fd5799b0ff43d625d061e4c2bde66b5fb86a.tar.xz
openshift-afa4fd5799b0ff43d625d061e4c2bde66b5fb86a.zip
Combine openshift_node and openshift_node_upgrade
Currently, having openshift_node and openshift_node_upgrade as two distinct roles has created a duplication across handlers, templates, and some tasks. This commit combines the roles to reduce duplication and bugs encountered by not putting code in both places.
Diffstat (limited to 'roles/openshift_node_upgrade')
-rw-r--r--roles/openshift_node_upgrade/README.md111
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml15
-rw-r--r--roles/openshift_node_upgrade/files/nuke_images.sh25
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml36
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml13
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml16
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml17
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml13
-rw-r--r--roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml14
-rw-r--r--roles/openshift_node_upgrade/tasks/docker/upgrade.yml40
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml183
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml46
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml46
-rw-r--r--roles/openshift_node_upgrade/tasks/rpm_upgrade.yml29
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml37
-rw-r--r--roles/openshift_node_upgrade/templates/node.service.j231
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service11
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service50
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf3
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.docker.service17
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j21
25 files changed, 0 insertions, 786 deletions
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
deleted file mode 100644
index 73b98ad90..000000000
--- a/roles/openshift_node_upgrade/README.md
+++ /dev/null
@@ -1,111 +0,0 @@
-OpenShift Node upgrade
-=========
-
-Role responsible for a single node upgrade.
-It is expected a node is functioning and a part of an OpenShift cluster.
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-From this role:
-
-| Name | Default value | |
-|--------------------------------|-----------------------|--------------------------------------------------------|
-| deployment_type | | Inventory var |
-| docker_upgrade_nuke_images | | Optional inventory var |
-| docker_version | | Optional inventory var |
-| l_docker_upgrade | | |
-| node_config_hook | | |
-| openshift.docker.gte_1_10 | | |
-| openshift_image_tag | | Set by openshift_version role |
-| openshift_pkg_version | | Set by openshift_version role |
-| openshift_release | | Set by openshift_version role |
-| skip_docker_restart | | |
-| openshift_cloudprovider_kind | | |
-
-From openshift.common:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.common.config_base |---------------------|---------------------|
-| openshift.common.hostname |---------------------|---------------------|
-| openshift.common.http_proxy |---------------------|---------------------|
-| openshift.common.is_atomic |---------------------|---------------------|
-| openshift.common.is_containerized |---------------------|---------------------|
-| openshift.common.portal_net |---------------------|---------------------|
-| openshift.common.service_type |---------------------|---------------------|
-
-From openshift.master:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.master.api_port |---------------------|---------------------|
-
-From openshift.node:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.node.node_image |---------------------|---------------------|
-| openshift.node.ovs_image |---------------------|---------------------|
-
-
-Dependencies
-------------
-
-
-TODO
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-```
----
-- name: Upgrade nodes
- hosts: oo_nodes_to_upgrade
- serial: 1
- any_errors_fatal: true
-
- pre_tasks:
- - name: Mark unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
- - name: Drain Node for Kubelet upgrade
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
- delegate_to: "{{ groups.oo_first_master.0 }}"
- register: l_docker_upgrade_drain_result
- until: not l_docker_upgrade_drain_result | failed
- retries: 60
- delay: 60
-
-
- roles:
- - openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
-
- post_tasks:
- - name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-TODO
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
deleted file mode 100644
index 1da434e6f..000000000
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-openshift_node_debug_level: "{{ debug_level | default(2) }}"
-
-openshift_use_openshift_sdn: True
-os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
-
-openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
-openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
-
-# oreg_url is defined by user input
-oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
-oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
-oreg_auth_credentials_replace: False
-l_bind_docker_reg_auth: False
-openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
diff --git a/roles/openshift_node_upgrade/files/nuke_images.sh b/roles/openshift_node_upgrade/files/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/roles/openshift_node_upgrade/files/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
deleted file mode 100644
index 90d80855e..000000000
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: restart openvswitch
- systemd:
- name: openvswitch
- state: restarted
- when:
- - not skip_node_svc_handlers | default(False) | bool
- - not (ovs_service_status_changed | default(false) | bool)
- - openshift_use_openshift_sdn | bool
- register: l_openshift_node_upgrade_stop_openvswitch_result
- until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
- retries: 3
- delay: 30
- notify:
- - restart openvswitch pause
-
-- name: restart openvswitch pause
- pause: seconds=15
- when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
-
-- name: restart node
- systemd:
- name: "{{ openshift.common.service_type }}-node"
- state: restarted
- register: l_openshift_node_upgrade_restart_node_result
- until: not l_openshift_node_upgrade_restart_node_result | failed
- retries: 3
- delay: 30
- when:
- - (not skip_node_svc_handlers | default(False) | bool)
- - not (node_service_status_changed | default(false) | bool)
-
-# TODO(jchaloup): once it is verified the systemd module works as expected
-# switch to it: http://docs.ansible.com/ansible/latest/systemd_module.html
-- name: reload systemd units
- command: systemctl daemon-reload
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
deleted file mode 100644
index a810b01dc..000000000
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: your name
- description: OpenShift Node upgrade
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: lib_utils
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
deleted file mode 100644
index 527580481..000000000
--- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Configure Node settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- - regex: '^IMAGE_VERSION='
- line: "IMAGE_VERSION={{ openshift_image_tag }}"
- notify:
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml
deleted file mode 100644
index d60794305..000000000
--- a/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Configure Proxy Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- - regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- - regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
- when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
- notify:
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml
deleted file mode 100644
index ee91a88ab..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install Node dependencies docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
- src: openshift.docker.node.dep.service
- notify:
- - reload systemd units
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml
deleted file mode 100644
index f92ff79b5..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install Node docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: openshift.docker.node.service
- notify:
- - reload systemd units
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml
deleted file mode 100644
index c2c5ea1d4..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install OpenvSwitch docker service file
- template:
- dest: "/etc/systemd/system/openvswitch.service"
- src: openvswitch.docker.service
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml
deleted file mode 100644
index 1d75a3355..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Create the openvswitch service env file
- template:
- src: openvswitch.sysconfig.j2
- dest: /etc/sysconfig/openvswitch
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml b/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
deleted file mode 100644
index 5df1abc79..000000000
--- a/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# May be a temporary workaround.
-# https://bugzilla.redhat.com/show_bug.cgi?id=1331590
-- name: Create OpenvSwitch service.d directory
- file: path=/etc/systemd/system/openvswitch.service.d/ state=directory
-
-- name: Install OpenvSwitch service OOM fix
- template:
- dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf"
- src: openvswitch-avoid-oom.conf
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml b/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml
deleted file mode 100644
index 07b0ac715..000000000
--- a/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
-# restart services. We will unconditionally restart all containerized services
-# because we have to unconditionally restart Docker:
-- set_fact:
- skip_node_svc_handlers: True
-
-- name: Update systemd units
- include: systemd_units.yml
-
-# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
-# play when the node has already been marked schedulable again. (this would look strange
-# in logs otherwise)
-- meta: flush_handlers
diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
deleted file mode 100644
index ebe87d6fd..000000000
--- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-# input variables:
-# - openshift.common.service_type
-# - openshift.common.is_containerized
-# - docker_upgrade_nuke_images
-# - docker_version
-# - skip_docker_restart
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
-
-- debug: var=docker_image_count.stdout
-
-# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition
-- name: Remove all containers and images
- script: nuke_images.sh
- register: nuke_images_result
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- service:
- name: docker
- state: stopped
- register: l_openshift_node_upgrade_docker_stop_result
- until: not l_openshift_node_upgrade_docker_stop_result | failed
- retries: 3
- delay: 30
-
-- name: Upgrade Docker
- package: name=docker{{ '-' + docker_version }} state=present
-
-# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
deleted file mode 100644
index 66c1fcc38..000000000
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ /dev/null
@@ -1,183 +0,0 @@
----
-# input variables:
-# - l_docker_upgrade
-# - openshift.common.is_atomic
-# - node_config_hook
-# - openshift_pkg_version
-# - openshift.common.is_containerized
-# - deployment_type
-# - openshift_release
-
-# tasks file for openshift_node_upgrade
-
-- include: registry_auth.yml
-
-- name: Stop node and openvswitch services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift.common.service_type }}-node"
- - openvswitch
- failed_when: false
-
-- name: Stop additional containerized services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-master-api"
- - etcd_container
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Pre-pull node image
- command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
-- name: Pre-pull openvswitch image
- command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when:
- - openshift.common.is_containerized | bool
- - openshift_use_openshift_sdn | bool
-
-- include: docker/upgrade.yml
- vars:
- # We will restart Docker ourselves after everything is ready:
- skip_docker_restart: True
- when:
- - l_docker_upgrade is defined
- - l_docker_upgrade | bool
-
-- include: "{{ node_config_hook }}"
- when: node_config_hook is defined
-
-- include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
-
-- name: Remove obsolete docker-sdn-ovs.conf
- file:
- path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
- state: absent
-
-- include: containerized_node_upgrade.yml
- when: openshift.common.is_containerized | bool
-
-- name: Ensure containerized services stopped before Docker restart
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Stop rpm based services
- service:
- name: "{{ item }}"
- state: stopped
- with_items:
- - "{{ openshift.common.service_type }}-node"
- - openvswitch
- failed_when: false
- when: not openshift.common.is_containerized | bool
-
-# https://bugzilla.redhat.com/show_bug.cgi?id=1513054
-- name: Clean up dockershim data
- file:
- path: "/var/lib/dockershim/sandbox/"
- state: absent
-
-- name: Upgrade openvswitch
- package:
- name: openvswitch
- state: latest
- when: not openshift.common.is_containerized | bool
-
-- name: Update oreg value
- yedit:
- src: "{{ openshift.common.config_base }}/node/node-config.yaml"
- key: 'imageConfig.format'
- value: "{{ oreg_url | default(oreg_url_node) }}"
- when: oreg_url is defined or oreg_url_node is defined
-
-# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
-- name: Check for swap usage
- command: grep "^[^#].*swap" /etc/fstab
- # grep: match any lines which don't begin with '#' and contain 'swap'
- changed_when: false
- failed_when: false
- register: swap_result
-
- # Disable Swap Block
-- block:
-
- - name: Disable swap
- command: swapoff --all
-
- - name: Remove swap entries from /etc/fstab
- replace:
- dest: /etc/fstab
- regexp: '(^[^#].*swap.*)'
- replace: '# \1'
- backup: yes
-
- - name: Add notice about disabling swap
- lineinfile:
- dest: /etc/fstab
- line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
- state: present
-
- when:
- - swap_result.stdout_lines | length > 0
- - openshift_disable_swap | default(true) | bool
- # End Disable Swap Block
-
-- name: Reset selinux context
- command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
- when:
- - ansible_selinux is defined
- - ansible_selinux.status == 'enabled'
-
-- name: Apply 3.6 dns config changes
- yedit:
- src: /etc/origin/node/node-config.yaml
- key: "{{ item.key }}"
- value: "{{ item.value }}"
- with_items:
- - key: "dnsBindAddress"
- value: "127.0.0.1:53"
- - key: "dnsRecursiveResolvConf"
- value: "/etc/origin/node/resolv.conf"
-
-# Restart all services
-- include: restart.yml
-
-- name: Wait for node to be ready
- oc_obj:
- state: list
- kind: node
- name: "{{ openshift.common.hostname | lower }}"
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
- # Give the node two minutes to come back online.
- retries: 24
- delay: 5
-
-- include_role:
- name: openshift_node_dnsmasq
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
deleted file mode 100644
index f5428867a..000000000
--- a/roles/openshift_node_upgrade/tasks/registry_auth.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Check for credentials file for registry auth
- stat:
- path: "{{ oreg_auth_credentials_path }}"
- when: oreg_auth_user is defined
- register: node_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - not (openshift_docker_alternative_creds | default(False))
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
- retries: 3
- delay: 5
- until: node_oreg_auth_credentials_create.rc == 0
- notify:
- - restart node
-
-# docker_creds is a custom module from lib_utils
-# 'docker login' requires a docker.service running on the local host, this is an
-# alternative implementation for non-docker hosts. This implementation does not
-# check the registry to determine whether or not the credentials will work.
-- name: Create credentials for registry auth (alternative)
- docker_creds:
- path: "{{ oreg_auth_credentials_path }}"
- registry: "{{ oreg_host }}"
- username: "{{ oreg_auth_user }}"
- password: "{{ oreg_auth_password }}"
- when:
- - openshift_docker_alternative_creds | bool
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
- notify:
- - restart node
-
-# Container images may need the registry credentials
-- name: Setup ro mount of /root/.docker for containerized hosts
- set_fact:
- l_bind_docker_reg_auth: True
- when:
- - openshift.common.is_containerized | bool
- - oreg_auth_user is defined
- - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
deleted file mode 100644
index a4fa51172..000000000
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-# input variables:
-# - openshift.common.service_type
-# - openshift.common.is_containerized
-# - openshift.common.hostname
-# - openshift.master.api_port
-
-# NOTE: This is needed to make sure we are using the correct set
-# of systemd unit files. The RPMs lay down defaults but
-# the install/upgrade may override them in /etc/systemd/system/.
-# NOTE: We don't use the systemd module as some versions of the module
-# require a service to be part of the call.
-- name: Reload systemd to ensure latest unit files
- command: systemctl daemon-reload
-
-- name: Restart docker
- service:
- name: "{{ openshift.docker.service_name }}"
- state: started
- register: docker_start_result
- until: not docker_start_result | failed
- retries: 3
- delay: 30
-
-- name: Update docker facts
- openshift_facts:
- role: docker
-
-- name: Start services
- service: name={{ item }} state=started
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
-
-- name: Wait for master API to come back online
- wait_for:
- host: "{{ openshift.common.hostname }}"
- state: started
- delay: 10
- port: "{{ openshift.master.api_port }}"
- timeout: 600
- when: inventory_hostname in groups.oo_masters_to_config
diff --git a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
deleted file mode 100644
index a998acf21..000000000
--- a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# input variables:
-# - openshift.common.service_type
-# - component
-# - openshift_pkg_version
-# - openshift.common.is_atomic
-
-# We verified latest rpm available is suitable, so just yum update.
-- name: Upgrade packages
- package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
-
-- name: Ensure python-yaml present for config upgrade
- package: name=PyYAML state=present
- when: not openshift.common.is_atomic | bool
-
-- name: Install Node service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: "node.service.j2"
- register: l_node_unit
-
-# NOTE: This is needed to make sure we are using the correct set
-# of systemd unit files. The RPMs lay down defaults but
-# the install/upgrade may override them in /etc/systemd/system/.
-# NOTE: We don't use the systemd module as some versions of the module
-# require a service to be part of the call.
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: l_node_unit | changed
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
deleted file mode 100644
index 226f5290c..000000000
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# input variables
-# - openshift.node.node_image
-# - openshift_image_tag
-# - openshift.common.is_containerized
-# - openshift.node.ovs_image
-# - openshift_use_openshift_sdn
-# - openshift.common.service_type
-# - openshift_node_debug_level
-# - openshift.common.config_base
-# - openshift.common.http_proxy
-# - openshift.common.portal_net
-# - openshift.common
-# - openshift.common.http_proxy
-# notify:
-# - restart openvswitch
-# - restart node
-
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks.
-- include: config/install-node-deps-docker-service-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/install-node-docker-service-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/install-ovs-service-env-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift_use_openshift_sdn | bool
-
-- include: config/install-ovs-docker-service-file.yml
- when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool
-
-- include: config/configure-node-settings.yml
-- include: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node_upgrade/templates/node.service.j2 b/roles/openshift_node_upgrade/templates/node.service.j2
deleted file mode 100644
index e12a52c15..000000000
--- a/roles/openshift_node_upgrade/templates/node.service.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-[Unit]
-Description=OpenShift Node
-After={{ openshift.docker.service_name }}.service
-Wants=openvswitch.service
-After=ovsdb-server.service
-After=ovs-vswitchd.service
-Wants={{ openshift.docker.service_name }}.service
-Documentation=https://github.com/openshift/origin
-Requires=dnsmasq.service
-After=dnsmasq.service
-
-[Service]
-Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-Environment=GOTRACEBACK=crash
-ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
-ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
-ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
-LimitNOFILE=65536
-LimitCORE=infinity
-WorkingDirectory=/var/lib/origin/
-SyslogIdentifier={{ openshift.common.service_type }}-node
-Restart=always
-RestartSec=5s
-TimeoutStartSec=300
-OOMScoreAdjust=-999
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
deleted file mode 100644
index aae35719c..000000000
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Requires={{ openshift.docker.service_name }}.service
-After={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.common.service_type }}-node.service
-Before={{ openshift.common.service_type }}-node.service
-
-
-[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
-ExecStop=
-SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
deleted file mode 100644
index 07d1ebc3c..000000000
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ /dev/null
@@ -1,50 +0,0 @@
-[Unit]
-After={{ openshift.common.service_type }}-master.service
-After={{ openshift.docker.service_name }}.service
-After=openvswitch.service
-PartOf={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-{% if openshift_use_openshift_sdn %}
-Wants=openvswitch.service
-PartOf=openvswitch.service
-After=ovsdb-server.service
-After=ovs-vswitchd.service
-{% endif %}
-Wants={{ openshift.common.service_type }}-master.service
-Requires={{ openshift.common.service_type }}-node-dep.service
-After={{ openshift.common.service_type }}-node-dep.service
-Requires=dnsmasq.service
-After=dnsmasq.service
-
-[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
-ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
- --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
- -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
- -e HOST=/rootfs -e HOST_ETC=/host-etc \
- -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
- -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
- {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
- -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
- -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
- -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
- -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
- -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
- -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
- -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
- {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.node.node_image }}:${IMAGE_VERSION}
-ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
-ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
-ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-SyslogIdentifier={{ openshift.common.service_type }}-node
-Restart=always
-RestartSec=5s
-
-[Install]
-WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf b/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf
deleted file mode 100644
index 3229bc56b..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-# Avoid the OOM killer for openvswitch and it's children:
-[Service]
-OOMScoreAdjust=-1000
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
deleted file mode 100644
index 34aaaabd6..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch.docker.service
+++ /dev/null
@@ -1,17 +0,0 @@
-[Unit]
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
-
-[Service]
-EnvironmentFile=/etc/sysconfig/openvswitch
-ExecStartPre=-/usr/bin/docker rm -f openvswitch
-ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}:${IMAGE_VERSION}
-ExecStartPost=/usr/bin/sleep 5
-ExecStop=/usr/bin/docker stop openvswitch
-SyslogIdentifier=openvswitch
-Restart=always
-RestartSec=5s
-
-[Install]
-WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2 b/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2
deleted file mode 100644
index da7c3742a..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2
+++ /dev/null
@@ -1 +0,0 @@
-IMAGE_VERSION={{ openshift_image_tag }}