summaryrefslogtreecommitdiffstats
path: root/roles/lib_utils/library
diff options
context:
space:
mode:
Diffstat (limited to 'roles/lib_utils/library')
-rwxr-xr-xroles/lib_utils/library/delegated_serial_command.py274
-rw-r--r--roles/lib_utils/library/docker_creds.py4
-rw-r--r--roles/lib_utils/library/kubeclient_ca.py88
-rw-r--r--roles/lib_utils/library/modify_yaml.py117
-rw-r--r--roles/lib_utils/library/openshift_cert_expiry.py839
-rw-r--r--roles/lib_utils/library/openshift_container_binary_sync.py205
-rw-r--r--roles/lib_utils/library/os_firewall_manage_iptables.py283
-rw-r--r--roles/lib_utils/library/rpm_q.py72
-rw-r--r--roles/lib_utils/library/swapoff.py137
9 files changed, 2018 insertions, 1 deletions
diff --git a/roles/lib_utils/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py
new file mode 100755
index 000000000..0cab1ca88
--- /dev/null
+++ b/roles/lib_utils/library/delegated_serial_command.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# (c) 2016, Andrew Butcher <abutcher@redhat.com>
+#
+# This module is derrived from the Ansible command module.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin
+
+''' delegated_serial_command '''
+
+import datetime
+import errno
+import glob
+import shlex
+import os
+import fcntl
+import time
+
+DOCUMENTATION = '''
+---
+module: delegated_serial_command
+short_description: Executes a command on a remote node
+version_added: historical
+description:
+ - The M(command) module takes the command name followed by a list
+ of space-delimited arguments.
+ - The given command will be executed on all selected nodes. It
+ will not be processed through the shell, so variables like
+ C($HOME) and operations like C("<"), C(">"), C("|"), and C("&")
+ will not work (use the M(shell) module if you need these
+ features).
+ - Creates and maintains a lockfile such that this module will
+ wait for other invocations to proceed.
+options:
+ command:
+ description:
+ - the command to run
+ required: true
+ default: null
+ creates:
+ description:
+ - a filename or (since 2.0) glob pattern, when it already
+ exists, this step will B(not) be run.
+ required: no
+ default: null
+ removes:
+ description:
+ - a filename or (since 2.0) glob pattern, when it does not
+ exist, this step will B(not) be run.
+ version_added: "0.8"
+ required: no
+ default: null
+ chdir:
+ description:
+ - cd into this directory before running the command
+ version_added: "0.6"
+ required: false
+ default: null
+ executable:
+ description:
+ - change the shell used to execute the command. Should be an
+ absolute path to the executable.
+ required: false
+ default: null
+ version_added: "0.9"
+ warn:
+ version_added: "1.8"
+ default: yes
+ description:
+ - if command warnings are on in ansible.cfg, do not warn about
+ this particular line if set to no/false.
+ required: false
+ lockfile:
+ default: yes
+ description:
+ - the lockfile that will be created
+ timeout:
+ default: yes
+ description:
+ - time in milliseconds to wait to obtain the lock
+notes:
+ - If you want to run a command through the shell (say you are using C(<),
+ C(>), C(|), etc), you actually want the M(shell) module instead. The
+ M(command) module is much more secure as it's not affected by the user's
+ environment.
+ - " C(creates), C(removes), and C(chdir) can be specified after
+ the command. For instance, if you only want to run a command if
+ a certain file does not exist, use this."
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+ - Andrew Butcher
+'''
+
+EXAMPLES = '''
+# Example from Ansible Playbooks.
+- delegated_serial_command:
+ command: /sbin/shutdown -t now
+
+# Run the command if the specified file does not exist.
+- delegated_serial_command:
+ command: /usr/bin/make_database.sh arg1 arg2
+ creates: /path/to/database
+'''
+
+# Dict of options and their defaults
+OPTIONS = {'chdir': None,
+ 'creates': None,
+ 'command': None,
+ 'executable': None,
+ 'NO_LOG': None,
+ 'removes': None,
+ 'warn': True,
+ 'lockfile': None,
+ 'timeout': None}
+
+
+def check_command(commandline):
+ ''' Check provided command '''
+ arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
+ 'ln': 'state=link', 'mkdir': 'state=directory',
+ 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
+ commands = {'git': 'git', 'hg': 'hg', 'curl': 'get_url or uri', 'wget': 'get_url or uri',
+ 'svn': 'subversion', 'service': 'service',
+ 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
+ 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
+ 'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper'}
+ become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas']
+ warnings = list()
+ command = os.path.basename(commandline.split()[0])
+ # pylint: disable=line-too-long
+ if command in arguments:
+ warnings.append("Consider using file module with {0} rather than running {1}".format(arguments[command], command))
+ if command in commands:
+ warnings.append("Consider using {0} module rather than running {1}".format(commands[command], command))
+ if command in become:
+ warnings.append(
+ "Consider using 'become', 'become_method', and 'become_user' rather than running {0}".format(command,))
+ return warnings
+
+
+# pylint: disable=too-many-statements,too-many-branches,too-many-locals
+def main():
+ ''' Main module function '''
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ _uses_shell=dict(type='bool', default=False),
+ command=dict(required=True),
+ chdir=dict(),
+ executable=dict(),
+ creates=dict(),
+ removes=dict(),
+ warn=dict(type='bool', default=True),
+ lockfile=dict(default='/tmp/delegated_serial_command.lock'),
+ timeout=dict(type='int', default=30)
+ )
+ )
+
+ shell = module.params['_uses_shell']
+ chdir = module.params['chdir']
+ executable = module.params['executable']
+ command = module.params['command']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ warn = module.params['warn']
+ lockfile = module.params['lockfile']
+ timeout = module.params['timeout']
+
+ if command.strip() == '':
+ module.fail_json(rc=256, msg="no command given")
+
+ iterated = 0
+ lockfd = open(lockfile, 'w+')
+ while iterated < timeout:
+ try:
+ fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ break
+ # pylint: disable=invalid-name
+ except IOError as e:
+ if e.errno != errno.EAGAIN:
+ module.fail_json(msg="I/O Error {0}: {1}".format(e.errno, e.strerror))
+ else:
+ iterated += 1
+ time.sleep(0.1)
+
+ if chdir:
+ chdir = os.path.abspath(os.path.expanduser(chdir))
+ os.chdir(chdir)
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ path = os.path.expanduser(creates)
+ if glob.glob(path):
+ module.exit_json(
+ cmd=command,
+ stdout="skipped, since %s exists" % path,
+ changed=False,
+ stderr=False,
+ rc=0
+ )
+
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ path = os.path.expanduser(removes)
+ if not glob.glob(path):
+ module.exit_json(
+ cmd=command,
+ stdout="skipped, since %s does not exist" % path,
+ changed=False,
+ stderr=False,
+ rc=0
+ )
+
+ warnings = list()
+ if warn:
+ warnings = check_command(command)
+
+ if not shell:
+ command = shlex.split(command)
+ startd = datetime.datetime.now()
+
+ # pylint: disable=invalid-name
+ rc, out, err = module.run_command(command, executable=executable, use_unsafe_shell=shell)
+
+ fcntl.flock(lockfd, fcntl.LOCK_UN)
+ lockfd.close()
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ if out is None:
+ out = ''
+ if err is None:
+ err = ''
+
+ module.exit_json(
+ cmd=command,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ rc=rc,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ changed=True,
+ warnings=warnings,
+ iterated=iterated
+ )
+
+
+# import module snippets
+# pylint: disable=wrong-import-position
+from ansible.module_utils.basic import * # noqa: F402,F403
+
+main()
diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py
index d4674845e..936fb1c38 100644
--- a/roles/lib_utils/library/docker_creds.py
+++ b/roles/lib_utils/library/docker_creds.py
@@ -135,7 +135,7 @@ def update_config(docker_config, registry, username, password):
docker_config['auths'][registry] = {}
# base64 encode our username:password string
- encoded_data = base64.b64encode('{}:{}'.format(username, password))
+ encoded_data = base64.b64encode('{}:{}'.format(username, password).encode())
# check if the same value is already present for idempotency.
if 'auth' in docker_config['auths'][registry]:
@@ -148,6 +148,8 @@ def update_config(docker_config, registry, username, password):
def write_config(module, docker_config, dest):
'''Write updated credentials into dest/config.json'''
+ if not isinstance(docker_config, dict):
+ docker_config = docker_config.decode()
conf_file_path = os.path.join(dest, 'config.json')
try:
with open(conf_file_path, 'w') as conf_file:
diff --git a/roles/lib_utils/library/kubeclient_ca.py b/roles/lib_utils/library/kubeclient_ca.py
new file mode 100644
index 000000000..a89a5574f
--- /dev/null
+++ b/roles/lib_utils/library/kubeclient_ca.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+''' kubeclient_ca ansible module '''
+
+import base64
+import yaml
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: kubeclient_ca
+short_description: Modify kubeclient certificate-authority-data
+author: Andrew Butcher
+requirements: [ ]
+'''
+EXAMPLES = '''
+- kubeclient_ca:
+ client_path: /etc/origin/master/admin.kubeconfig
+ ca_path: /etc/origin/master/ca-bundle.crt
+
+- slurp:
+ src: /etc/origin/master/ca-bundle.crt
+ register: ca_data
+- kubeclient_ca:
+ client_path: /etc/origin/master/admin.kubeconfig
+ ca_data: "{{ ca_data.content }}"
+'''
+
+
+def main():
+ ''' Modify kubeconfig located at `client_path`, setting the
+ certificate authority data to specified `ca_data` or contents of
+ `ca_path`.
+ '''
+
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ client_path=dict(required=True),
+ ca_data=dict(required=False, default=None),
+ ca_path=dict(required=False, default=None),
+ backup=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['ca_data', 'ca_path']],
+ required_one_of=[['ca_data', 'ca_path']]
+ )
+
+ client_path = module.params['client_path']
+ ca_data = module.params['ca_data']
+ ca_path = module.params['ca_path']
+ backup = module.params['backup']
+
+ try:
+ with open(client_path) as client_config_file:
+ client_config_data = yaml.safe_load(client_config_file.read())
+
+ if ca_data is None:
+ with open(ca_path) as ca_file:
+ ca_data = base64.standard_b64encode(ca_file.read())
+
+ changes = []
+ # Naively update the CA information for each cluster in the
+ # kubeconfig.
+ for cluster in client_config_data['clusters']:
+ if cluster['cluster']['certificate-authority-data'] != ca_data:
+ cluster['cluster']['certificate-authority-data'] = ca_data
+ changes.append(cluster['name'])
+
+ if not module.check_mode:
+ if len(changes) > 0 and backup:
+ module.backup_local(client_path)
+
+ with open(client_path, 'w') as client_config_file:
+ client_config_string = yaml.dump(client_config_data, default_flow_style=False)
+ client_config_string = client_config_string.replace('\'\'', '""')
+ client_config_file.write(client_config_string)
+
+ return module.exit_json(changed=(len(changes) > 0))
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception as error:
+ return module.fail_json(msg=str(error))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/modify_yaml.py b/roles/lib_utils/library/modify_yaml.py
new file mode 100644
index 000000000..9b8f9ba33
--- /dev/null
+++ b/roles/lib_utils/library/modify_yaml.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+''' modify_yaml ansible module '''
+
+import yaml
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+from ansible.module_utils.basic import * # noqa: F402,F403
+
+
+DOCUMENTATION = '''
+---
+module: modify_yaml
+short_description: Modify yaml key value pairs
+author: Andrew Butcher
+requirements: [ ]
+'''
+EXAMPLES = '''
+- modify_yaml:
+ dest: /etc/origin/master/master-config.yaml
+ yaml_key: 'kubernetesMasterConfig.masterCount'
+ yaml_value: 2
+'''
+
+
+def set_key(yaml_data, yaml_key, yaml_value):
+ ''' Updates a parsed yaml structure setting a key to a value.
+
+ :param yaml_data: yaml structure to modify.
+ :type yaml_data: dict
+ :param yaml_key: Key to modify.
+ :type yaml_key: mixed
+ :param yaml_value: Value use for yaml_key.
+ :type yaml_value: mixed
+ :returns: Changes to the yaml_data structure
+ :rtype: dict(tuple())
+ '''
+ changes = []
+ ptr = yaml_data
+ final_key = yaml_key.split('.')[-1]
+ for key in yaml_key.split('.'):
+ # Key isn't present and we're not on the final key. Set to empty dictionary.
+ if key not in ptr and key != final_key:
+ ptr[key] = {}
+ ptr = ptr[key]
+ # Current key is the final key. Update value.
+ elif key == final_key:
+ if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr): # noqa: F405
+ ptr[key] = yaml_value
+ changes.append((yaml_key, yaml_value))
+ else:
+ # Next value is None and we're not on the final key.
+ # Turn value into an empty dictionary.
+ if ptr[key] is None and key != final_key:
+ ptr[key] = {}
+ ptr = ptr[key]
+ return changes
+
+
+def main():
+ ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting
+ the key to the desired value.
+ '''
+
+ # disabling pylint errors for global-variable-undefined and invalid-name
+ # for 'global module' usage, since it is required to use ansible_facts
+ # pylint: disable=global-variable-undefined, invalid-name,
+ # redefined-outer-name
+ global module
+
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ dest=dict(required=True),
+ yaml_key=dict(required=True),
+ yaml_value=dict(required=True),
+ backup=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ dest = module.params['dest']
+ yaml_key = module.params['yaml_key']
+ yaml_value = module.safe_eval(module.params['yaml_value'])
+ backup = module.params['backup']
+
+ # Represent null values as an empty string.
+ # pylint: disable=missing-docstring, unused-argument
+ def none_representer(dumper, data):
+ return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'')
+
+ yaml.add_representer(type(None), none_representer)
+
+ try:
+ with open(dest) as yaml_file:
+ yaml_data = yaml.safe_load(yaml_file.read())
+
+ changes = set_key(yaml_data, yaml_key, yaml_value)
+
+ if len(changes) > 0:
+ if backup:
+ module.backup_local(dest)
+ with open(dest, 'w') as yaml_file:
+ yaml_string = yaml.dump(yaml_data, default_flow_style=False)
+ yaml_string = yaml_string.replace('\'\'', '""')
+ yaml_file.write(yaml_string)
+
+ return module.exit_json(changed=(len(changes) > 0), changes=changes)
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception as error:
+ return module.fail_json(msg=str(error))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py
new file mode 100644
index 000000000..e355266b0
--- /dev/null
+++ b/roles/lib_utils/library/openshift_cert_expiry.py
@@ -0,0 +1,839 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# pylint: disable=line-too-long,invalid-name
+
+"""For details on this module see DOCUMENTATION (below)"""
+
+import base64
+import datetime
+import io
+import os
+import subprocess
+import yaml
+
+# pylint import-error disabled because pylint cannot find the package
+# when installed in a virtualenv
+from ansible.module_utils.six.moves import configparser # pylint: disable=import-error
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ # You can comment this import out and include a 'pass' in this
+ # block if you're manually testing this module on a NON-ATOMIC
+ # HOST (or any host that just doesn't have PyOpenSSL
+ # available). That will force the `load_and_handle_cert` function
+ # to use the Fake OpenSSL classes.
+ import OpenSSL.crypto
+ HAS_OPENSSL = True
+except ImportError:
+ # Some platforms (such as RHEL Atomic) may not have the Python
+ # OpenSSL library installed. In this case we will use a manual
+ # work-around to parse each certificate.
+ #
+ # Check for 'OpenSSL.crypto' in `sys.modules` later.
+ HAS_OPENSSL = False
+
+DOCUMENTATION = '''
+---
+module: openshift_cert_expiry
+short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster
+description:
+ - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired.
+ - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following:
+ - C(ok) - not expired, and outside of the expiration C(warning_days) window.
+ - C(warning) - not expired, but will expire between now and the C(warning_days) window.
+ - C(expired) - an expired certificate.
+ - Certificate flagging follow this logic:
+ - If the expiration date is before now then the certificate is classified as C(expired).
+ - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning).
+ - All other conditions are classified as C(ok).
+ - The following keys are ALSO present in the certificate summary:
+ - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted)
+ - C(days_remaining) - The number of days until the certificate expires.
+ - C(expiry) - The date the certificate expires on.
+ - C(path) - The full path to the certificate on the examined host.
+version_added: "1.0"
+options:
+ config_base:
+ description:
+ - Base path to OCP system settings.
+ required: false
+ default: /etc/origin
+ warning_days:
+ description:
+ - Flag certificates which will expire in C(warning_days) days from now.
+ required: false
+ default: 30
+ show_all:
+ description:
+ - Enable this option to show analysis of ALL certificates examined by this module.
+ - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported.
+ required: false
+ default: false
+
+author: "Tim Bielawa (@tbielawa) <tbielawa@redhat.com>"
+'''
+
+EXAMPLES = '''
+# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now
+- openshift_cert_expiry:
+
+# Expand the warning window to show certificates expiring within a year from now
+- openshift_cert_expiry: warning_days=365
+
+# Show expired, soon to expire (now + 30 days), and all other certificates examined
+- openshift_cert_expiry: show_all=true
+'''
+
+
+class FakeOpenSSLCertificate(object):
+ """This provides a rough mock of what you get from
+`OpenSSL.crypto.load_certificate()`. This is a work-around for
+platforms missing the Python OpenSSL library.
+ """
+ def __init__(self, cert_string):
+ """`cert_string` is a certificate in the form you get from running a
+.crt through 'openssl x509 -in CERT.cert -text'"""
+ self.cert_string = cert_string
+ self.serial = None
+ self.subject = None
+ self.extensions = []
+ self.not_after = None
+ self._parse_cert()
+
+ def _parse_cert(self):
+ """Manually parse the certificate line by line"""
+ self.extensions = []
+
+ PARSING_ALT_NAMES = False
+ PARSING_HEX_SERIAL = False
+ for line in self.cert_string.split('\n'):
+ l = line.strip()
+ if PARSING_ALT_NAMES:
+ # We're parsing a 'Subject Alternative Name' line
+ self.extensions.append(
+ FakeOpenSSLCertificateSANExtension(l))
+
+ PARSING_ALT_NAMES = False
+ continue
+
+ if PARSING_HEX_SERIAL:
+ # Hex serials arrive colon-delimited
+ serial_raw = l.replace(':', '')
+ # Convert to decimal
+ self.serial = int('0x' + serial_raw, base=16)
+ PARSING_HEX_SERIAL = False
+ continue
+
+ # parse out the bits that we can
+ if l.startswith('Serial Number:'):
+ # Decimal format:
+ # Serial Number: 11 (0xb)
+ # => 11
+ # Hex Format (large serials):
+ # Serial Number:
+ # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf
+ # => 14449739080294792594019643629255165375
+ if l.endswith(':'):
+ PARSING_HEX_SERIAL = True
+ continue
+ self.serial = int(l.split()[-2])
+
+ elif l.startswith('Not After :'):
+ # Not After : Feb 7 18:19:35 2019 GMT
+ # => strptime(str, '%b %d %H:%M:%S %Y %Z')
+ # => strftime('%Y%m%d%H%M%SZ')
+ # => 20190207181935Z
+ not_after_raw = l.partition(' : ')[-1]
+ # Last item: ('Not After', ' : ', 'Feb 7 18:19:35 2019 GMT')
+ not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z')
+ self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ')
+
+ elif l.startswith('X509v3 Subject Alternative Name:'):
+ PARSING_ALT_NAMES = True
+ continue
+
+ elif l.startswith('Subject:'):
+ # O = system:nodes, CN = system:node:m01.example.com
+ self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
+
+ def get_serial_number(self):
+ """Return the serial number of the cert"""
+ return self.serial
+
+ def get_subject(self):
+ """Subjects must implement get_components() and return dicts or
+tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject':
+
+ Subject: Subject: O=system:nodes, CN=system:node:m01.example.com
+
+might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')]
+ """
+ return self.subject
+
+ def get_extension(self, i):
+ """Extensions must implement get_short_name() and return the string
+'subjectAltName'"""
+ return self.extensions[i]
+
+ def get_extension_count(self):
+ """ get_extension_count """
+ return len(self.extensions)
+
+ def get_notAfter(self):
+ """Returns a date stamp as a string in the form
+'20180922170439Z'. strptime the result with format param:
+'%Y%m%d%H%M%SZ'."""
+ return self.not_after
+
+
+class FakeOpenSSLCertificateSANExtension(object): # pylint: disable=too-few-public-methods
+ """Mocks what happens when `get_extension` is called on a certificate
+object"""
+
+ def __init__(self, san_string):
+ """With `san_string` as you get from:
+
+ $ openssl x509 -in certificate.crt -text
+ """
+ self.san_string = san_string
+ self.short_name = 'subjectAltName'
+
+ def get_short_name(self):
+ """Return the 'type' of this extension. It's always the same though
+because we only care about subjectAltName's"""
+ return self.short_name
+
+ def __str__(self):
+ """Return this extension and the value as a simple string"""
+ return self.san_string
+
+
+# pylint: disable=too-few-public-methods
+class FakeOpenSSLCertificateSubjects(object):
+ """Mocks what happens when `get_subject` is called on a certificate
+object"""
+
+ def __init__(self, subject_string):
+ """With `subject_string` as you get from:
+
+ $ openssl x509 -in certificate.crt -text
+ """
+ self.subjects = []
+ for s in subject_string.split(', '):
+ name, _, value = s.partition(' = ')
+ self.subjects.append((name, value))
+
+ def get_components(self):
+ """Returns a list of tuples"""
+ return self.subjects
+
+
+######################################################################
+def filter_paths(path_list):
+ """`path_list` - A list of file paths to check. Only files which exist
+will be returned
+ """
+ return [p for p in path_list if os.path.exists(os.path.realpath(p))]
+
+
+# pylint: disable=too-many-locals,too-many-branches
+#
+# TODO: Break this function down into smaller chunks
+def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None):
+ """Load a certificate, split off the good parts, and return some
+useful data
+
+Params:
+
+- `cert_string` (string) - a certificate loaded into a string object
+- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
+- `base64decode` (bool) - run base64.b64decode() on the input
+- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors)
+
+Returns:
+A tuple of the form:
+ (cert_subject, cert_expiry_date, time_remaining, cert_serial_number)
+ """
+ if base64decode:
+ _cert_string = base64.b64decode(cert_string).decode('utf-8')
+ else:
+ _cert_string = cert_string
+
+ # Disable this. We 'redefine' the type because we are working
+ # around a missing library on the target host.
+ #
+ # pylint: disable=redefined-variable-type
+ if HAS_OPENSSL:
+ # No work-around required
+ cert_loaded = OpenSSL.crypto.load_certificate(
+ OpenSSL.crypto.FILETYPE_PEM, _cert_string)
+ else:
+ # Missing library, work-around required. Run the 'openssl'
+ # command on it to decode it
+ cmd = 'openssl x509 -text'
+ try:
+ openssl_proc = subprocess.Popen(cmd.split(),
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ except OSError:
+ ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.")
+ else:
+ openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8')
+ cert_loaded = FakeOpenSSLCertificate(openssl_decoded)
+
+ ######################################################################
+ # Read all possible names from the cert
+ cert_subjects = []
+ for name, value in cert_loaded.get_subject().get_components():
+ if isinstance(name, bytes) or isinstance(value, bytes):
+ name = name.decode('utf-8')
+ value = value.decode('utf-8')
+ cert_subjects.append('{}:{}'.format(name, value))
+
+ # To read SANs from a cert we must read the subjectAltName
+ # extension from the X509 Object. What makes this more difficult
+ # is that pyOpenSSL does not give extensions as an iterable
+ san = None
+ for i in range(cert_loaded.get_extension_count()):
+ ext = cert_loaded.get_extension(i)
+ if ext.get_short_name() == 'subjectAltName':
+ san = ext
+
+ if san is not None:
+ # The X509Extension object for subjectAltName prints as a
+ # string with the alt names separated by a comma and a
+ # space. Split the string by ', ' and then add our new names
+ # to the list of existing names
+ cert_subjects.extend(str(san).split(', '))
+
+ cert_subject = ', '.join(cert_subjects)
+ ######################################################################
+
+ # Grab the expiration date
+ not_after = cert_loaded.get_notAfter()
+ # example get_notAfter() => 20180922170439Z
+ if isinstance(not_after, bytes):
+ not_after = not_after.decode('utf-8')
+
+ cert_expiry_date = datetime.datetime.strptime(
+ not_after,
+ '%Y%m%d%H%M%SZ')
+
+ time_remaining = cert_expiry_date - now
+
+ return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number())
+
+
+def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
+ """Given metadata about a certificate under examination, classify it
+ into one of three categories, 'ok', 'warning', and 'expired'.
+
+Params:
+
+- `cert_meta` dict - A dict with certificate metadata. Required fields
+ include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'.
+- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
+- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires
+- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is
+- `cert_list` list - A list to shove the classified cert into
+
+Return:
+- `cert_list` - The updated list of classified certificates
+ """
+ expiry_str = str(cert_meta['expiry'])
+ # Categorization
+ if cert_meta['expiry'] < now:
+ # This already expired, must NOTIFY
+ cert_meta['health'] = 'expired'
+ elif time_remaining < expire_window:
+ # WARN about this upcoming expirations
+ cert_meta['health'] = 'warning'
+ else:
+ # Not expired or about to expire
+ cert_meta['health'] = 'ok'
+
+ cert_meta['expiry'] = expiry_str
+ cert_meta['serial_hex'] = hex(int(cert_meta['serial']))
+ cert_list.append(cert_meta)
+ return cert_list
+
+
+def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs):
+ """Calculate the summary text for when the module finishes
+running. This includes counts of each classification and what have
+you.
+
+Params:
+
+- `certificates` (list of dicts) - Processed `expire_check_result`
+ dicts with filled in `health` keys for system certificates.
+- `kubeconfigs` - as above for kubeconfigs
+- `etcd_certs` - as above for etcd certs
+
+Return:
+
+- `summary_results` (dict) - Counts of each cert type classification
+ and total items examined.
+ """
+ items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs
+
+ summary_results = {
+ 'system_certificates': len(certificates),
+ 'kubeconfig_certificates': len(kubeconfigs),
+ 'etcd_certificates': len(etcd_certs),
+ 'router_certs': len(router_certs),
+ 'registry_certs': len(registry_certs),
+ 'total': len(items),
+ 'ok': 0,
+ 'warning': 0,
+ 'expired': 0
+ }
+
+ summary_results['expired'] = len([c for c in items if c['health'] == 'expired'])
+ summary_results['warning'] = len([c for c in items if c['health'] == 'warning'])
+ summary_results['ok'] = len([c for c in items if c['health'] == 'ok'])
+
+ return summary_results
+
+
+######################################################################
+# This is our module MAIN function after all, so there's bound to be a
+# lot of code bundled up into one block
+#
+# Reason: These checks are disabled because the issue was introduced
+# during a period where the pylint checks weren't enabled for this file
+# Status: temporarily disabled pending future refactoring
+# pylint: disable=too-many-locals,too-many-statements,too-many-branches
+def main():
+ """This module examines certificates (in various forms) which compose
+an OpenShift Container Platform cluster
+ """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ config_base=dict(
+ required=False,
+ default="/etc/origin",
+ type='str'),
+ warning_days=dict(
+ required=False,
+ default=30,
+ type='int'),
+ show_all=dict(
+ required=False,
+ default=False,
+ type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ # Basic scaffolding for OpenShift specific certs
+ openshift_base_config_path = os.path.realpath(module.params['config_base'])
+ openshift_master_config_path = os.path.join(openshift_base_config_path,
+ "master", "master-config.yaml")
+ openshift_node_config_path = os.path.join(openshift_base_config_path,
+ "node", "node-config.yaml")
+ openshift_cert_check_paths = [
+ openshift_master_config_path,
+ openshift_node_config_path,
+ ]
+
+ # Paths for Kubeconfigs. Additional kubeconfigs are conditionally
+ # checked later in the code
+ master_kube_configs = ['admin', 'openshift-master',
+ 'openshift-node', 'openshift-router',
+ 'openshift-registry']
+
+ kubeconfig_paths = []
+ for m_kube_config in master_kube_configs:
+ kubeconfig_paths.append(
+ os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig")
+ )
+
+ # Validate some paths we have the ability to do ahead of time
+ openshift_cert_check_paths = filter_paths(openshift_cert_check_paths)
+ kubeconfig_paths = filter_paths(kubeconfig_paths)
+
+ # etcd, where do you hide your certs? Used when parsing etcd.conf
+ etcd_cert_params = [
+ "ETCD_CA_FILE",
+ "ETCD_CERT_FILE",
+ "ETCD_PEER_CA_FILE",
+ "ETCD_PEER_CERT_FILE",
+ ]
+
+ # Expiry checking stuff
+ now = datetime.datetime.now()
+ # todo, catch exception for invalid input and return a fail_json
+ warning_days = int(module.params['warning_days'])
+ expire_window = datetime.timedelta(days=warning_days)
+
+ # Module stuff
+ #
+ # The results of our cert checking to return from the task call
+ check_results = {}
+ check_results['meta'] = {}
+ check_results['meta']['warning_days'] = warning_days
+ check_results['meta']['checked_at_time'] = str(now)
+ check_results['meta']['warn_before_date'] = str(now + expire_window)
+ check_results['meta']['show_all'] = str(module.params['show_all'])
+ # All the analyzed certs accumulate here
+ ocp_certs = []
+
+ ######################################################################
+ # Sure, why not? Let's enable check mode.
+ if module.check_mode:
+ check_results['ocp_certs'] = []
+ module.exit_json(
+ check_results=check_results,
+ msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'],
+ rc=0,
+ changed=False
+ )
+
+ ######################################################################
+ # Check for OpenShift Container Platform specific certs
+ ######################################################################
+ for os_cert in filter_paths(openshift_cert_check_paths):
+ # Open up that config file and locate the cert and CA
+ with io.open(os_cert, 'r', encoding='utf-8') as fp:
+ cert_meta = {}
+ cfg = yaml.load(fp)
+ # cert files are specified in parsed `fp` as relative to the path
+ # of the original config file. 'master-config.yaml' with certFile
+ # = 'foo.crt' implies that 'foo.crt' is in the same
+ # directory. certFile = '../foo.crt' is in the parent directory.
+ cfg_path = os.path.dirname(fp.name)
+ cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile'])
+ cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA'])
+
+ ######################################################################
+ # Load the certificate and the CA, parse their expiration dates into
+ # datetime objects so we can manipulate them later
+ for v in cert_meta.values():
+ with io.open(v, 'r', encoding='utf-8') as fp:
+ cert = fp.read()
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(cert, now, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
+
+ ######################################################################
+ # /Check for OpenShift Container Platform specific certs
+ ######################################################################
+
+ ######################################################################
+ # Check service Kubeconfigs
+ ######################################################################
+ kubeconfigs = []
+
+ # There may be additional kubeconfigs to check, but their naming
+ # is less predictable than the ones we've already assembled.
+
+ try:
+ # Try to read the standard 'node-config.yaml' file to check if
+ # this host is a node.
+ with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp:
+ cfg = yaml.load(fp)
+
+ # OK, the config file exists, therefore this is a
+ # node. Nodes have their own kubeconfig files to
+ # communicate with the master API. Let's read the relative
+ # path to that file from the node config.
+ node_masterKubeConfig = cfg['masterKubeConfig']
+ # As before, the path to the 'masterKubeConfig' file is
+ # relative to `fp`
+ cfg_path = os.path.dirname(fp.name)
+ node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)
+
+ with io.open(node_kubeconfig, 'r', encoding='utf8') as fp:
+ # Read in the nodes kubeconfig file and grab the good stuff
+ cfg = yaml.load(fp)
+
+ c = cfg['users'][0]['user']['client-certificate-data']
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
+ except IOError:
+ # This is not a node
+ pass
+
+ for kube in filter_paths(kubeconfig_paths):
+ with io.open(kube, 'r', encoding='utf-8') as fp:
+ # TODO: Maybe consider catching exceptions here?
+ cfg = yaml.load(fp)
+
+ # Per conversation, "the kubeconfigs you care about:
+ # admin, router, registry should all be single
+ # value". Following that advice we only grab the data for
+ # the user at index 0 in the 'users' list. There should
+ # not be more than one user.
+ c = cfg['users'][0]['user']['client-certificate-data']
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
+
+ ######################################################################
+ # /Check service Kubeconfigs
+ ######################################################################
+
+ ######################################################################
+ # Check etcd certs
+ #
+ # Two things to check: 'external' etcd, and embedded etcd.
+ ######################################################################
+ # FIRST: The 'external' etcd
+ #
+ # Some values may be duplicated, make this a set for now so we
+ # unique them all
+ etcd_certs_to_check = set([])
+ etcd_certs = []
+ etcd_cert_params.append('dne')
+ try:
+ with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp:
+ # Add dummy header section.
+ config = io.StringIO()
+ config.write(u'[ETCD]\n')
+ config.write(fp.read().replace('%', '%%'))
+ config.seek(0, os.SEEK_SET)
+
+ etcd_config = configparser.ConfigParser()
+ etcd_config.readfp(config)
+
+ for param in etcd_cert_params:
+ try:
+ etcd_certs_to_check.add(etcd_config.get('ETCD', param))
+ except configparser.NoOptionError:
+ # That parameter does not exist, oh well...
+ pass
+ except IOError:
+ # No etcd to see here, move along
+ pass
+
+ for etcd_cert in filter_paths(etcd_certs_to_check):
+ with io.open(etcd_cert, 'r', encoding='utf-8') as fp:
+ c = fp.read()
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
+
+ ######################################################################
+ # Now the embedded etcd
+ ######################################################################
+ try:
+ with io.open('/etc/origin/master/master-config.yaml', 'r', encoding='utf-8') as fp:
+ cfg = yaml.load(fp)
+ except IOError:
+ # Not present
+ pass
+ else:
+ if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None:
+ # This is embedded
+ etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile']
+ else:
+ # Not embedded
+ etcd_crt_name = None
+
+ if etcd_crt_name is not None:
+ # etcd_crt_name is relative to the location of the
+ # master-config.yaml file
+ cfg_path = os.path.dirname(fp.name)
+ etcd_cert = os.path.join(cfg_path, etcd_crt_name)
+ with open(etcd_cert, 'r') as etcd_fp:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(etcd_fp.read(), now, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': etcd_fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
+
+ ######################################################################
+ # /Check etcd certs
+ ######################################################################
+
+ ######################################################################
+ # Check router/registry certs
+ #
+ # These are saved as secrets in etcd. That means that we can not
+ # simply read a file to grab the data. Instead we're going to
+ # subprocess out to the 'oc get' command. On non-masters this
+ # command will fail, that is expected so we catch that exception.
+ ######################################################################
+ router_certs = []
+ registry_certs = []
+
+ ######################################################################
+ # First the router certs
+ try:
+ router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(),
+ stdout=subprocess.PIPE)
+ router_ds = yaml.load(router_secrets_raw.communicate()[0])
+ router_c = router_ds['data']['tls.crt']
+ router_path = router_ds['metadata']['selfLink']
+ except TypeError:
+ # YAML couldn't load the result, this is not a master
+ pass
+ except OSError:
+ # The OC command doesn't exist here. Move along.
+ pass
+ else:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': router_path,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
+
+ ######################################################################
+ # Now for registry
+ try:
+ registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(),
+ stdout=subprocess.PIPE)
+ registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
+ registry_c = registry_ds['data']['registry.crt']
+ registry_path = registry_ds['metadata']['selfLink']
+ except TypeError:
+ # YAML couldn't load the result, this is not a master
+ pass
+ except OSError:
+ # The OC command doesn't exist here. Move along.
+ pass
+ else:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': registry_path,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
+
+ ######################################################################
+ # /Check router/registry certs
+ ######################################################################
+
+ res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs)
+
+ msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format(
+ count=res['total'],
+ exp=res['expired'],
+ warn=res['warning'],
+ ok=res['ok'],
+ window=int(module.params['warning_days']),
+ )
+
+ # By default we only return detailed information about expired or
+ # warning certificates. If show_all is true then we will print all
+ # the certificates examined.
+ if not module.params['show_all']:
+ check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']]
+ check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']]
+ check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']]
+ check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']]
+ check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']]
+ else:
+ check_results['ocp_certs'] = ocp_certs
+ check_results['kubeconfigs'] = kubeconfigs
+ check_results['etcd'] = etcd_certs
+ check_results['registry'] = registry_certs
+ check_results['router'] = router_certs
+
+ # Sort the final results to report in order of ascending safety
+ # time. That is to say, the certificates which will expire sooner
+ # will be at the front of the list and certificates which will
+ # expire later are at the end. Router and registry certs should be
+ # limited to just 1 result, so don't bother sorting those.
+ def cert_key(item):
+ ''' return the days_remaining key '''
+ return item['days_remaining']
+
+ check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
+ check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
+ check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)
+
+ # This module will never change anything, but we might want to
+ # change the return code parameter if there is some catastrophic
+ # error we noticed earlier
+ module.exit_json(
+ check_results=check_results,
+ summary=res,
+ msg=msg,
+ rc=0,
+ changed=False
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py
new file mode 100644
index 000000000..efdfcf1c7
--- /dev/null
+++ b/roles/lib_utils/library/openshift_container_binary_sync.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# pylint: disable=missing-docstring,invalid-name
+
+import random
+import tempfile
+import shutil
+import os.path
+
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import * # noqa: F403
+
+
+DOCUMENTATION = '''
+---
+module: openshift_container_binary_sync
+short_description: Copies OpenShift binaries out of the given image tag to host system.
+'''
+
+
+class BinarySyncError(Exception):
+ def __init__(self, msg):
+ super(BinarySyncError, self).__init__(msg)
+ self.msg = msg
+
+
+# pylint: disable=too-few-public-methods,too-many-instance-attributes
+class BinarySyncer(object):
+ """
+ Syncs the openshift, oc, and kubectl binaries/symlinks out of
+ a container onto the host system.
+ """
+
+ def __init__(self, module, image, tag, backend):
+ self.module = module
+ self.changed = False
+ self.output = []
+ self.bin_dir = '/usr/local/bin'
+ self._image = image
+ self.tag = tag
+ self.backend = backend
+ self.temp_dir = None # TBD
+
+ def sync(self):
+ if self.backend == 'atomic':
+ return self._sync_atomic()
+
+ return self._sync_docker()
+
+ def _sync_atomic(self):
+ self.temp_dir = tempfile.mkdtemp()
+ temp_dir_mount = tempfile.mkdtemp()
+ try:
+ image_spec = '%s:%s' % (self.image, self.tag)
+ rc, stdout, stderr = self.module.run_command(['atomic', 'mount',
+ '--storage', "ostree",
+ image_spec, temp_dir_mount])
+ if rc:
+ raise BinarySyncError("Error mounting image. stdout=%s, stderr=%s" %
+ (stdout, stderr))
+ for i in ["openshift", "oc"]:
+ src_file = os.path.join(temp_dir_mount, "usr/bin", i)
+ shutil.copy(src_file, self.temp_dir)
+
+ self._sync_binaries()
+ finally:
+ self.module.run_command(['atomic', 'umount', temp_dir_mount])
+ shutil.rmtree(temp_dir_mount)
+ shutil.rmtree(self.temp_dir)
+
+ def _sync_docker(self):
+ container_name = "openshift-cli-%s" % random.randint(1, 100000)
+ rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name',
+ container_name, '%s:%s' % (self.image, self.tag)])
+ if rc:
+ raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" %
+ (stdout, stderr))
+ self.output.append(stdout)
+ try:
+ self.temp_dir = tempfile.mkdtemp()
+ self.output.append("Using temp dir: %s" % self.temp_dir)
+
+ rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name,
+ self.temp_dir])
+ if rc:
+ raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
+ (stdout, stderr))
+
+ rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name,
+ self.temp_dir])
+ if rc:
+ raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
+ (stdout, stderr))
+
+ self._sync_binaries()
+ finally:
+ shutil.rmtree(self.temp_dir)
+ self.module.run_command(['docker', 'rm', container_name])
+
+ def _sync_binaries(self):
+ self._sync_binary('openshift')
+
+ # In older versions, oc was a symlink to openshift:
+ if os.path.islink(os.path.join(self.temp_dir, 'oc')):
+ self._sync_symlink('oc', 'openshift')
+ else:
+ self._sync_binary('oc')
+
+ # Ensure correct symlinks created:
+ self._sync_symlink('kubectl', 'oc')
+
+ # Remove old oadm binary
+ if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+ os.remove(os.path.join(self.bin_dir, 'oadm'))
+
+ def _sync_symlink(self, binary_name, link_to):
+ """ Ensure the given binary name exists and links to the expected binary. """
+
+ # The symlink we are creating:
+ link_path = os.path.join(self.bin_dir, binary_name)
+
+ # The expected file we should be linking to:
+ link_dest = os.path.join(self.bin_dir, link_to)
+
+ if not os.path.exists(link_path) or \
+ not os.path.islink(link_path) or \
+ os.path.realpath(link_path) != os.path.realpath(link_dest):
+ if os.path.exists(link_path):
+ os.remove(link_path)
+ os.symlink(link_to, os.path.join(self.bin_dir, binary_name))
+ self.output.append("Symlinked %s to %s." % (link_path, link_dest))
+ self.changed = True
+
+ def _sync_binary(self, binary_name):
+ src_path = os.path.join(self.temp_dir, binary_name)
+ dest_path = os.path.join(self.bin_dir, binary_name)
+ incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]
+ if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:
+
+ # See: https://github.com/openshift/openshift-ansible/issues/4965
+ if os.path.islink(dest_path):
+ os.unlink(dest_path)
+ self.output.append('Removed old symlink {} before copying binary.'.format(dest_path))
+ shutil.move(src_path, dest_path)
+ self.output.append("Moved %s to %s." % (src_path, dest_path))
+ self.changed = True
+
+ @property
+ def raw_image(self):
+ """
+ Returns the image as it was originally passed in to the instance.
+
+ .. note::
+ This image string will only work directly with the atomic command.
+
+ :returns: The original image passed in.
+ :rtype: str
+ """
+ return self._image
+
+ @property
+ def image(self):
+ """
+ Returns the image without atomic prefixes used to map to skopeo args.
+
+ :returns: The image string without prefixes
+ :rtype: str
+ """
+ image = self._image
+ for remove in ('oci:', 'http:', 'https:'):
+ if image.startswith(remove):
+ image = image.replace(remove, '')
+ return image
+
+
+def main():
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ image=dict(required=True),
+ tag=dict(required=True),
+ backend=dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ image = module.params['image']
+ tag = module.params['tag']
+ backend = module.params['backend']
+
+ if backend not in ["docker", "atomic"]:
+ module.fail_json(msg="unknown backend")
+
+ binary_syncer = BinarySyncer(module, image, tag, backend)
+
+ try:
+ binary_syncer.sync()
+ except BinarySyncError as ex:
+ module.fail_json(msg=ex.msg)
+
+ return module.exit_json(changed=binary_syncer.changed,
+ output=binary_syncer.output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/os_firewall_manage_iptables.py b/roles/lib_utils/library/os_firewall_manage_iptables.py
new file mode 100644
index 000000000..aeee3ede8
--- /dev/null
+++ b/roles/lib_utils/library/os_firewall_manage_iptables.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# pylint: disable=fixme, missing-docstring
+import subprocess
+
+DOCUMENTATION = '''
+---
+module: os_firewall_manage_iptables
+short_description: This module manages iptables rules for a given chain
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+
+class IpTablesError(Exception):
+ def __init__(self, msg, cmd, exit_code, output):
+ super(IpTablesError, self).__init__(msg)
+ self.msg = msg
+ self.cmd = cmd
+ self.exit_code = exit_code
+ self.output = output
+
+
+class IpTablesAddRuleError(IpTablesError):
+ pass
+
+
+class IpTablesRemoveRuleError(IpTablesError):
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ super(IpTablesRemoveRuleError, self).__init__(msg, cmd, exit_code,
+ output)
+ self.chain = chain
+
+
+class IpTablesSaveError(IpTablesError):
+ pass
+
+
+class IpTablesCreateChainError(IpTablesError):
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
+ output)
+ self.chain = chain
+
+
+class IpTablesCreateJumpRuleError(IpTablesError):
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+ super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
+ output)
+ self.chain = chain
+
+
+# TODO: implement rollbacks for any events that were successful and an
+# exception was thrown later. For example, when the chain is created
+# successfully, but the add/remove rule fails.
+class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
+ def __init__(self, module):
+ self.module = module
+ self.ip_version = module.params['ip_version']
+ self.check_mode = module.check_mode
+ self.chain = module.params['chain']
+ self.create_jump_rule = module.params['create_jump_rule']
+ self.jump_rule_chain = module.params['jump_rule_chain']
+ self.cmd = self.gen_cmd()
+ self.save_cmd = self.gen_save_cmd()
+ self.output = []
+ self.changed = False
+
+ def save(self):
+ try:
+ self.output.append(subprocess.check_output(self.save_cmd, stderr=subprocess.STDOUT))
+ except subprocess.CalledProcessError as ex:
+ raise IpTablesSaveError(
+ msg="Failed to save iptables rules",
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
+
+ def verify_chain(self):
+ if not self.chain_exists():
+ self.create_chain()
+ if self.create_jump_rule and not self.jump_rule_exists():
+ self.create_jump()
+
+ def add_rule(self, port, proto):
+ rule = self.gen_rule(port, proto)
+ if not self.rule_exists(rule):
+ self.verify_chain()
+
+ if self.check_mode:
+ self.changed = True
+ self.output.append("Create rule for %s %s" % (proto, port))
+ else:
+ cmd = self.cmd + ['-A'] + rule
+ try:
+ self.output.append(subprocess.check_output(cmd))
+ self.changed = True
+ self.save()
+ except subprocess.CalledProcessError as ex:
+ raise IpTablesCreateChainError(
+ chain=self.chain,
+ msg="Failed to create rule for "
+ "%s %s" % (proto, port),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
+
+ def remove_rule(self, port, proto):
+ rule = self.gen_rule(port, proto)
+ if self.rule_exists(rule):
+ if self.check_mode:
+ self.changed = True
+ self.output.append("Remove rule for %s %s" % (proto, port))
+ else:
+ cmd = self.cmd + ['-D'] + rule
+ try:
+ self.output.append(subprocess.check_output(cmd))
+ self.changed = True
+ self.save()
+ except subprocess.CalledProcessError as ex:
+ raise IpTablesRemoveRuleError(
+ chain=self.chain,
+ msg="Failed to remove rule for %s %s" % (proto, port),
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
+
+ def rule_exists(self, rule):
+ check_cmd = self.cmd + ['-C'] + rule
+ return True if subprocess.call(check_cmd) == 0 else False
+
+ @staticmethod
+ def port_as_argument(port):
+ if isinstance(port, int):
+ return str(port)
+ if isinstance(port, basestring): # noqa: F405
+ return port.replace('-', ":")
+ return port
+
+ def gen_rule(self, port, proto):
+ return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
+ '-m', proto, '--dport', IpTablesManager.port_as_argument(port), '-j', 'ACCEPT']
+
+ def create_jump(self):
+ if self.check_mode:
+ self.changed = True
+ self.output.append("Create jump rule for chain %s" % self.chain)
+ else:
+ try:
+ cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+
+ # break the input rules into rows and columns
+ input_rules = [s.split() for s in to_native(output).split('\n')]
+
+ # Find the last numbered rule
+ last_rule_num = None
+ last_rule_target = None
+ for rule in input_rules[:-1]:
+ if rule:
+ try:
+ last_rule_num = int(rule[0])
+ except ValueError:
+ continue
+ last_rule_target = rule[1]
+
+ # Naively assume that if the last row is a REJECT or DROP rule,
+ # then we can insert our rule right before it, otherwise we
+ # assume that we can just append the rule.
+ if (last_rule_num and last_rule_target and last_rule_target in ['REJECT', 'DROP']):
+ # insert rule
+ cmd = self.cmd + ['-I', self.jump_rule_chain,
+ str(last_rule_num)]
+ else:
+ # append rule
+ cmd = self.cmd + ['-A', self.jump_rule_chain]
+ cmd += ['-j', self.chain]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ self.changed = True
+ self.output.append(output)
+ self.save()
+ except subprocess.CalledProcessError as ex:
+ if '--line-numbers' in ex.cmd:
+ raise IpTablesCreateJumpRuleError(
+ chain=self.chain,
+ msg=("Failed to query existing " +
+ self.jump_rule_chain +
+ " rules to determine jump rule location"),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
+ else:
+ raise IpTablesCreateJumpRuleError(
+ chain=self.chain,
+ msg=("Failed to create jump rule for chain " +
+ self.chain),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
+
+ def create_chain(self):
+ if self.check_mode:
+ self.changed = True
+ self.output.append("Create chain %s" % self.chain)
+ else:
+ try:
+ cmd = self.cmd + ['-N', self.chain]
+ self.output.append(subprocess.check_output(cmd, stderr=subprocess.STDOUT))
+ self.changed = True
+ self.output.append("Successfully created chain %s" %
+ self.chain)
+ self.save()
+ except subprocess.CalledProcessError as ex:
+ raise IpTablesCreateChainError(
+ chain=self.chain,
+ msg="Failed to create chain: %s" % self.chain,
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
+ )
+
+ def jump_rule_exists(self):
+ cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
+ return True if subprocess.call(cmd) == 0 else False
+
+ def chain_exists(self):
+ cmd = self.cmd + ['-L', self.chain]
+ return True if subprocess.call(cmd) == 0 else False
+
+ def gen_cmd(self):
+ cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
+ # Include -w (wait for xtables lock) in default arguments.
+ default_args = ['-w']
+ return ["/usr/sbin/%s" % cmd] + default_args
+
+ def gen_save_cmd(self): # pylint: disable=no-self-use
+ return ['/usr/libexec/iptables/iptables.init', 'save']
+
+
+def main():
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ name=dict(required=True),
+ action=dict(required=True, choices=['add', 'remove',
+ 'verify_chain']),
+ chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
+ create_jump_rule=dict(required=False, type='bool', default=True),
+ jump_rule_chain=dict(required=False, default='INPUT'),
+ protocol=dict(required=False, choices=['tcp', 'udp']),
+ port=dict(required=False, type='str'),
+ ip_version=dict(required=False, default='ipv4',
+ choices=['ipv4', 'ipv6']),
+ ),
+ supports_check_mode=True
+ )
+
+ action = module.params['action']
+ protocol = module.params['protocol']
+ port = module.params['port']
+
+ if action in ['add', 'remove']:
+ if not protocol:
+ error = "protocol is required when action is %s" % action
+ module.fail_json(msg=error)
+ if not port:
+ error = "port is required when action is %s" % action
+ module.fail_json(msg=error)
+
+ iptables_manager = IpTablesManager(module)
+
+ try:
+ if action == 'add':
+ iptables_manager.add_rule(port, protocol)
+ elif action == 'remove':
+ iptables_manager.remove_rule(port, protocol)
+ elif action == 'verify_chain':
+ iptables_manager.verify_chain()
+ except IpTablesError as ex:
+ module.fail_json(msg=ex.msg)
+
+ return module.exit_json(changed=iptables_manager.changed,
+ output=iptables_manager.output)
+
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
+# import module snippets
+from ansible.module_utils.basic import * # noqa: F403,E402
+from ansible.module_utils._text import to_native # noqa: E402
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/rpm_q.py b/roles/lib_utils/library/rpm_q.py
new file mode 100644
index 000000000..3dec50fc2
--- /dev/null
+++ b/roles/lib_utils/library/rpm_q.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tobias Florek <tob@butter.sh>
+# Licensed under the terms of the MIT License
+"""
+An ansible module to query the RPM database. For use, when yum/dnf are not
+available.
+"""
+
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import * # noqa: F403
+
+DOCUMENTATION = """
+---
+module: rpm_q
+short_description: Query the RPM database
+author: Tobias Florek
+options:
+ name:
+ description:
+ - The name of the package to query
+ required: true
+ state:
+ description:
+ - Whether the package is supposed to be installed or not
+ choices: [present, absent]
+ default: present
+"""
+
+EXAMPLES = """
+- rpm_q: name=ansible state=present
+- rpm_q: name=ansible state=absent
+"""
+
+RPM_BINARY = '/bin/rpm'
+
+
+def main():
+ """
+ Checks rpm -q for the named package and returns the installed packages
+ or None if not installed.
+ """
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # pylint: disable=invalid-name
+ rc, out, err = module.run_command([RPM_BINARY, '-q', name])
+
+ installed = out.rstrip('\n').split('\n')
+
+ if rc != 0:
+ if state == 'present':
+ module.fail_json(msg="%s is not installed" % name, stdout=out, stderr=err, rc=rc)
+ else:
+ module.exit_json(changed=False)
+ elif state == 'present':
+ module.exit_json(changed=False, installed_versions=installed)
+ else:
+ module.fail_json(msg="%s is installed", installed_versions=installed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/swapoff.py b/roles/lib_utils/library/swapoff.py
new file mode 100644
index 000000000..925eeb17d
--- /dev/null
+++ b/roles/lib_utils/library/swapoff.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+#
+# Copyright 2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: swapoff
+
+short_description: Disable swap and comment from /etc/fstab
+
+version_added: "2.4"
+
+description:
+ - This module disables swap and comments entries from /etc/fstab
+
+author:
+ - "Michael Gugino <mgugino@redhat.com>"
+'''
+
+EXAMPLES = '''
+# Pass in a message
+- name: Disable Swap
+ swapoff: {}
+'''
+
+
+def check_swap_in_fstab(module):
+ '''Check for uncommented swap entries in fstab'''
+ res = subprocess.call(['grep', '^[^#].*swap', '/etc/fstab'])
+
+ if res == 2:
+ # rc 2 == cannot open file.
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': 'unable to read /etc/fstab',
+ 'state': 'unknown'}
+ module.fail_json(**result)
+ elif res == 1:
+ # No grep match, fstab looks good.
+ return False
+ elif res == 0:
+ # There is an uncommented entry for fstab.
+ return True
+ else:
+ # Some other grep error code, we shouldn't get here.
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': 'unknow problem with grep "^[^#].*swap" /etc/fstab ',
+ 'state': 'unknown'}
+ module.fail_json(**result)
+
+
+def check_swapon_status(module):
+ '''Check if swap is actually in use.'''
+ try:
+ res = subprocess.check_output(['swapon', '--show'])
+ except subprocess.CalledProcessError:
+ # Some other grep error code, we shouldn't get here.
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': 'unable to execute swapon --show',
+ 'state': 'unknown'}
+ module.fail_json(**result)
+ return 'NAME' in str(res)
+
+
+def comment_swap_fstab(module):
+ '''Comment out swap lines in /etc/fstab'''
+ res = subprocess.call(['sed', '-i.bak', 's/^[^#].*swap.*/#&/', '/etc/fstab'])
+ if res:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': 'sed failed to comment swap in /etc/fstab',
+ 'state': 'unknown'}
+ module.fail_json(**result)
+
+
+def run_swapoff(module, changed):
+ '''Run swapoff command'''
+ res = subprocess.call(['swapoff', '--all'])
+ if res:
+ result = {'failed': True,
+ 'changed': changed,
+ 'msg': 'swapoff --all returned {}'.format(str(res)),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+
+
+def run_module():
+ '''Run this module'''
+ module = AnsibleModule(
+ supports_check_mode=False,
+ argument_spec={}
+ )
+ changed = False
+
+ swap_fstab_res = check_swap_in_fstab(module)
+ swap_is_inuse_res = check_swapon_status(module)
+
+ if swap_fstab_res:
+ comment_swap_fstab(module)
+ changed = True
+
+ if swap_is_inuse_res:
+ run_swapoff(module, changed)
+ changed = True
+
+ result = {'changed': changed}
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()