summaryrefslogtreecommitdiffstats
path: root/roles/lib_utils/library
diff options
context:
space:
mode:
authorMichael Gugino <mgugino@redhat.com>2018-01-04 23:55:34 -0500
committerMichael Gugino <mgugino@redhat.com>2018-01-10 11:34:36 -0500
commitd3fefc32a727fe3c13159c4e9fe4399f35b487a8 (patch)
tree3211ffc7fa4c8df9ff93928e705ef5314d339f3c /roles/lib_utils/library
parentee2d4b8e66a344e8f6ca12cbc9362a80a07555d0 (diff)
downloadopenshift-d3fefc32a727fe3c13159c4e9fe4399f35b487a8.tar.gz
openshift-d3fefc32a727fe3c13159c4e9fe4399f35b487a8.tar.bz2
openshift-d3fefc32a727fe3c13159c4e9fe4399f35b487a8.tar.xz
openshift-d3fefc32a727fe3c13159c4e9fe4399f35b487a8.zip
Move more plugins to lib_utils
This commit continues moving plugins into lib_utils. This commit does not move any plugins for add-on roles such as logging and metrics.
Diffstat (limited to 'roles/lib_utils/library')
-rwxr-xr-xroles/lib_utils/library/delegated_serial_command.py274
-rw-r--r--roles/lib_utils/library/openshift_cert_expiry.py839
-rw-r--r--roles/lib_utils/library/openshift_container_binary_sync.py205
3 files changed, 1318 insertions, 0 deletions
diff --git a/roles/lib_utils/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py
new file mode 100755
index 000000000..0cab1ca88
--- /dev/null
+++ b/roles/lib_utils/library/delegated_serial_command.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# (c) 2016, Andrew Butcher <abutcher@redhat.com>
+#
+# This module is derrived from the Ansible command module.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin
+
+''' delegated_serial_command '''
+
+import datetime
+import errno
+import glob
+import shlex
+import os
+import fcntl
+import time
+
+DOCUMENTATION = '''
+---
+module: delegated_serial_command
+short_description: Executes a command on a remote node
+version_added: historical
+description:
+ - The M(command) module takes the command name followed by a list
+ of space-delimited arguments.
+ - The given command will be executed on all selected nodes. It
+ will not be processed through the shell, so variables like
+ C($HOME) and operations like C("<"), C(">"), C("|"), and C("&")
+ will not work (use the M(shell) module if you need these
+ features).
+ - Creates and maintains a lockfile such that this module will
+ wait for other invocations to proceed.
+options:
+ command:
+ description:
+ - the command to run
+ required: true
+ default: null
+ creates:
+ description:
+ - a filename or (since 2.0) glob pattern, when it already
+ exists, this step will B(not) be run.
+ required: no
+ default: null
+ removes:
+ description:
+ - a filename or (since 2.0) glob pattern, when it does not
+ exist, this step will B(not) be run.
+ version_added: "0.8"
+ required: no
+ default: null
+ chdir:
+ description:
+ - cd into this directory before running the command
+ version_added: "0.6"
+ required: false
+ default: null
+ executable:
+ description:
+ - change the shell used to execute the command. Should be an
+ absolute path to the executable.
+ required: false
+ default: null
+ version_added: "0.9"
+ warn:
+ version_added: "1.8"
+ default: yes
+ description:
+ - if command warnings are on in ansible.cfg, do not warn about
+ this particular line if set to no/false.
+ required: false
+ lockfile:
+ default: yes
+ description:
+ - the lockfile that will be created
+ timeout:
+ default: yes
+ description:
+ - time in milliseconds to wait to obtain the lock
+notes:
+ - If you want to run a command through the shell (say you are using C(<),
+ C(>), C(|), etc), you actually want the M(shell) module instead. The
+ M(command) module is much more secure as it's not affected by the user's
+ environment.
+ - " C(creates), C(removes), and C(chdir) can be specified after
+ the command. For instance, if you only want to run a command if
+ a certain file does not exist, use this."
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+ - Andrew Butcher
+'''
+
+EXAMPLES = '''
+# Example from Ansible Playbooks.
+- delegated_serial_command:
+ command: /sbin/shutdown -t now
+
+# Run the command if the specified file does not exist.
+- delegated_serial_command:
+ command: /usr/bin/make_database.sh arg1 arg2
+ creates: /path/to/database
+'''
+
+# Dict of options and their defaults
+OPTIONS = {'chdir': None,
+ 'creates': None,
+ 'command': None,
+ 'executable': None,
+ 'NO_LOG': None,
+ 'removes': None,
+ 'warn': True,
+ 'lockfile': None,
+ 'timeout': None}
+
+
+def check_command(commandline):
+ ''' Check provided command '''
+ arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
+ 'ln': 'state=link', 'mkdir': 'state=directory',
+ 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
+ commands = {'git': 'git', 'hg': 'hg', 'curl': 'get_url or uri', 'wget': 'get_url or uri',
+ 'svn': 'subversion', 'service': 'service',
+ 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
+ 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
+ 'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper'}
+ become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas']
+ warnings = list()
+ command = os.path.basename(commandline.split()[0])
+ # pylint: disable=line-too-long
+ if command in arguments:
+ warnings.append("Consider using file module with {0} rather than running {1}".format(arguments[command], command))
+ if command in commands:
+ warnings.append("Consider using {0} module rather than running {1}".format(commands[command], command))
+ if command in become:
+ warnings.append(
+ "Consider using 'become', 'become_method', and 'become_user' rather than running {0}".format(command,))
+ return warnings
+
+
+# pylint: disable=too-many-statements,too-many-branches,too-many-locals
+def main():
+ ''' Main module function '''
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ _uses_shell=dict(type='bool', default=False),
+ command=dict(required=True),
+ chdir=dict(),
+ executable=dict(),
+ creates=dict(),
+ removes=dict(),
+ warn=dict(type='bool', default=True),
+ lockfile=dict(default='/tmp/delegated_serial_command.lock'),
+ timeout=dict(type='int', default=30)
+ )
+ )
+
+ shell = module.params['_uses_shell']
+ chdir = module.params['chdir']
+ executable = module.params['executable']
+ command = module.params['command']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ warn = module.params['warn']
+ lockfile = module.params['lockfile']
+ timeout = module.params['timeout']
+
+ if command.strip() == '':
+ module.fail_json(rc=256, msg="no command given")
+
+ iterated = 0
+ lockfd = open(lockfile, 'w+')
+ while iterated < timeout:
+ try:
+ fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ break
+ # pylint: disable=invalid-name
+ except IOError as e:
+ if e.errno != errno.EAGAIN:
+ module.fail_json(msg="I/O Error {0}: {1}".format(e.errno, e.strerror))
+ else:
+ iterated += 1
+ time.sleep(0.1)
+
+ if chdir:
+ chdir = os.path.abspath(os.path.expanduser(chdir))
+ os.chdir(chdir)
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ path = os.path.expanduser(creates)
+ if glob.glob(path):
+ module.exit_json(
+ cmd=command,
+ stdout="skipped, since %s exists" % path,
+ changed=False,
+ stderr=False,
+ rc=0
+ )
+
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ path = os.path.expanduser(removes)
+ if not glob.glob(path):
+ module.exit_json(
+ cmd=command,
+ stdout="skipped, since %s does not exist" % path,
+ changed=False,
+ stderr=False,
+ rc=0
+ )
+
+ warnings = list()
+ if warn:
+ warnings = check_command(command)
+
+ if not shell:
+ command = shlex.split(command)
+ startd = datetime.datetime.now()
+
+ # pylint: disable=invalid-name
+ rc, out, err = module.run_command(command, executable=executable, use_unsafe_shell=shell)
+
+ fcntl.flock(lockfd, fcntl.LOCK_UN)
+ lockfd.close()
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ if out is None:
+ out = ''
+ if err is None:
+ err = ''
+
+ module.exit_json(
+ cmd=command,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ rc=rc,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ changed=True,
+ warnings=warnings,
+ iterated=iterated
+ )
+
+
+# import module snippets
+# pylint: disable=wrong-import-position
+from ansible.module_utils.basic import * # noqa: F402,F403
+
+main()
diff --git a/roles/lib_utils/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py
new file mode 100644
index 000000000..e355266b0
--- /dev/null
+++ b/roles/lib_utils/library/openshift_cert_expiry.py
@@ -0,0 +1,839 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# pylint: disable=line-too-long,invalid-name
+
+"""For details on this module see DOCUMENTATION (below)"""
+
+import base64
+import datetime
+import io
+import os
+import subprocess
+import yaml
+
+# pylint import-error disabled because pylint cannot find the package
+# when installed in a virtualenv
+from ansible.module_utils.six.moves import configparser # pylint: disable=import-error
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ # You can comment this import out and include a 'pass' in this
+ # block if you're manually testing this module on a NON-ATOMIC
+ # HOST (or any host that just doesn't have PyOpenSSL
+ # available). That will force the `load_and_handle_cert` function
+ # to use the Fake OpenSSL classes.
+ import OpenSSL.crypto
+ HAS_OPENSSL = True
+except ImportError:
+ # Some platforms (such as RHEL Atomic) may not have the Python
+ # OpenSSL library installed. In this case we will use a manual
+ # work-around to parse each certificate.
+ #
+ # Check for 'OpenSSL.crypto' in `sys.modules` later.
+ HAS_OPENSSL = False
+
+DOCUMENTATION = '''
+---
+module: openshift_cert_expiry
+short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster
+description:
+ - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired.
+ - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following:
+ - C(ok) - not expired, and outside of the expiration C(warning_days) window.
+ - C(warning) - not expired, but will expire between now and the C(warning_days) window.
+ - C(expired) - an expired certificate.
+ - Certificate flagging follow this logic:
+ - If the expiration date is before now then the certificate is classified as C(expired).
+ - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning).
+ - All other conditions are classified as C(ok).
+ - The following keys are ALSO present in the certificate summary:
+ - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted)
+ - C(days_remaining) - The number of days until the certificate expires.
+ - C(expiry) - The date the certificate expires on.
+ - C(path) - The full path to the certificate on the examined host.
+version_added: "1.0"
+options:
+ config_base:
+ description:
+ - Base path to OCP system settings.
+ required: false
+ default: /etc/origin
+ warning_days:
+ description:
+ - Flag certificates which will expire in C(warning_days) days from now.
+ required: false
+ default: 30
+ show_all:
+ description:
+ - Enable this option to show analysis of ALL certificates examined by this module.
+ - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported.
+ required: false
+ default: false
+
+author: "Tim Bielawa (@tbielawa) <tbielawa@redhat.com>"
+'''
+
+EXAMPLES = '''
+# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now
+- openshift_cert_expiry:
+
+# Expand the warning window to show certificates expiring within a year from now
+- openshift_cert_expiry: warning_days=365
+
+# Show expired, soon to expire (now + 30 days), and all other certificates examined
+- openshift_cert_expiry: show_all=true
+'''
+
+
+class FakeOpenSSLCertificate(object):
+ """This provides a rough mock of what you get from
+`OpenSSL.crypto.load_certificate()`. This is a work-around for
+platforms missing the Python OpenSSL library.
+ """
+ def __init__(self, cert_string):
+ """`cert_string` is a certificate in the form you get from running a
+.crt through 'openssl x509 -in CERT.cert -text'"""
+ self.cert_string = cert_string
+ self.serial = None
+ self.subject = None
+ self.extensions = []
+ self.not_after = None
+ self._parse_cert()
+
+ def _parse_cert(self):
+ """Manually parse the certificate line by line"""
+ self.extensions = []
+
+ PARSING_ALT_NAMES = False
+ PARSING_HEX_SERIAL = False
+ for line in self.cert_string.split('\n'):
+ l = line.strip()
+ if PARSING_ALT_NAMES:
+ # We're parsing a 'Subject Alternative Name' line
+ self.extensions.append(
+ FakeOpenSSLCertificateSANExtension(l))
+
+ PARSING_ALT_NAMES = False
+ continue
+
+ if PARSING_HEX_SERIAL:
+ # Hex serials arrive colon-delimited
+ serial_raw = l.replace(':', '')
+ # Convert to decimal
+ self.serial = int('0x' + serial_raw, base=16)
+ PARSING_HEX_SERIAL = False
+ continue
+
+ # parse out the bits that we can
+ if l.startswith('Serial Number:'):
+ # Decimal format:
+ # Serial Number: 11 (0xb)
+ # => 11
+ # Hex Format (large serials):
+ # Serial Number:
+ # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf
+ # => 14449739080294792594019643629255165375
+ if l.endswith(':'):
+ PARSING_HEX_SERIAL = True
+ continue
+ self.serial = int(l.split()[-2])
+
+ elif l.startswith('Not After :'):
+ # Not After : Feb 7 18:19:35 2019 GMT
+ # => strptime(str, '%b %d %H:%M:%S %Y %Z')
+ # => strftime('%Y%m%d%H%M%SZ')
+ # => 20190207181935Z
+ not_after_raw = l.partition(' : ')[-1]
+ # Last item: ('Not After', ' : ', 'Feb 7 18:19:35 2019 GMT')
+ not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z')
+ self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ')
+
+ elif l.startswith('X509v3 Subject Alternative Name:'):
+ PARSING_ALT_NAMES = True
+ continue
+
+ elif l.startswith('Subject:'):
+ # O = system:nodes, CN = system:node:m01.example.com
+ self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
+
+ def get_serial_number(self):
+ """Return the serial number of the cert"""
+ return self.serial
+
+ def get_subject(self):
+ """Subjects must implement get_components() and return dicts or
+tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject':
+
+ Subject: Subject: O=system:nodes, CN=system:node:m01.example.com
+
+might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')]
+ """
+ return self.subject
+
+ def get_extension(self, i):
+ """Extensions must implement get_short_name() and return the string
+'subjectAltName'"""
+ return self.extensions[i]
+
+ def get_extension_count(self):
+ """ get_extension_count """
+ return len(self.extensions)
+
+ def get_notAfter(self):
+ """Returns a date stamp as a string in the form
+'20180922170439Z'. strptime the result with format param:
+'%Y%m%d%H%M%SZ'."""
+ return self.not_after
+
+
+class FakeOpenSSLCertificateSANExtension(object): # pylint: disable=too-few-public-methods
+ """Mocks what happens when `get_extension` is called on a certificate
+object"""
+
+ def __init__(self, san_string):
+ """With `san_string` as you get from:
+
+ $ openssl x509 -in certificate.crt -text
+ """
+ self.san_string = san_string
+ self.short_name = 'subjectAltName'
+
+ def get_short_name(self):
+ """Return the 'type' of this extension. It's always the same though
+because we only care about subjectAltName's"""
+ return self.short_name
+
+ def __str__(self):
+ """Return this extension and the value as a simple string"""
+ return self.san_string
+
+
+# pylint: disable=too-few-public-methods
+class FakeOpenSSLCertificateSubjects(object):
+ """Mocks what happens when `get_subject` is called on a certificate
+object"""
+
+ def __init__(self, subject_string):
+ """With `subject_string` as you get from:
+
+ $ openssl x509 -in certificate.crt -text
+ """
+ self.subjects = []
+ for s in subject_string.split(', '):
+ name, _, value = s.partition(' = ')
+ self.subjects.append((name, value))
+
+ def get_components(self):
+ """Returns a list of tuples"""
+ return self.subjects
+
+
+######################################################################
+def filter_paths(path_list):
+ """`path_list` - A list of file paths to check. Only files which exist
+will be returned
+ """
+ return [p for p in path_list if os.path.exists(os.path.realpath(p))]
+
+
+# pylint: disable=too-many-locals,too-many-branches
+#
+# TODO: Break this function down into smaller chunks
+def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None):
+ """Load a certificate, split off the good parts, and return some
+useful data
+
+Params:
+
+- `cert_string` (string) - a certificate loaded into a string object
+- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
+- `base64decode` (bool) - run base64.b64decode() on the input
+- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors)
+
+Returns:
+A tuple of the form:
+ (cert_subject, cert_expiry_date, time_remaining, cert_serial_number)
+ """
+ if base64decode:
+ _cert_string = base64.b64decode(cert_string).decode('utf-8')
+ else:
+ _cert_string = cert_string
+
+ # Disable this. We 'redefine' the type because we are working
+ # around a missing library on the target host.
+ #
+ # pylint: disable=redefined-variable-type
+ if HAS_OPENSSL:
+ # No work-around required
+ cert_loaded = OpenSSL.crypto.load_certificate(
+ OpenSSL.crypto.FILETYPE_PEM, _cert_string)
+ else:
+ # Missing library, work-around required. Run the 'openssl'
+ # command on it to decode it
+ cmd = 'openssl x509 -text'
+ try:
+ openssl_proc = subprocess.Popen(cmd.split(),
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ except OSError:
+ ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.")
+ else:
+ openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8')
+ cert_loaded = FakeOpenSSLCertificate(openssl_decoded)
+
+ ######################################################################
+ # Read all possible names from the cert
+ cert_subjects = []
+ for name, value in cert_loaded.get_subject().get_components():
+ if isinstance(name, bytes) or isinstance(value, bytes):
+ name = name.decode('utf-8')
+ value = value.decode('utf-8')
+ cert_subjects.append('{}:{}'.format(name, value))
+
+ # To read SANs from a cert we must read the subjectAltName
+ # extension from the X509 Object. What makes this more difficult
+ # is that pyOpenSSL does not give extensions as an iterable
+ san = None
+ for i in range(cert_loaded.get_extension_count()):
+ ext = cert_loaded.get_extension(i)
+ if ext.get_short_name() == 'subjectAltName':
+ san = ext
+
+ if san is not None:
+ # The X509Extension object for subjectAltName prints as a
+ # string with the alt names separated by a comma and a
+ # space. Split the string by ', ' and then add our new names
+ # to the list of existing names
+ cert_subjects.extend(str(san).split(', '))
+
+ cert_subject = ', '.join(cert_subjects)
+ ######################################################################
+
+ # Grab the expiration date
+ not_after = cert_loaded.get_notAfter()
+ # example get_notAfter() => 20180922170439Z
+ if isinstance(not_after, bytes):
+ not_after = not_after.decode('utf-8')
+
+ cert_expiry_date = datetime.datetime.strptime(
+ not_after,
+ '%Y%m%d%H%M%SZ')
+
+ time_remaining = cert_expiry_date - now
+
+ return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number())
+
+
+def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
+ """Given metadata about a certificate under examination, classify it
+ into one of three categories, 'ok', 'warning', and 'expired'.
+
+Params:
+
+- `cert_meta` dict - A dict with certificate metadata. Required fields
+ include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'.
+- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
+- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires
+- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is
+- `cert_list` list - A list to shove the classified cert into
+
+Return:
+- `cert_list` - The updated list of classified certificates
+ """
+ expiry_str = str(cert_meta['expiry'])
+ # Categorization
+ if cert_meta['expiry'] < now:
+ # This already expired, must NOTIFY
+ cert_meta['health'] = 'expired'
+ elif time_remaining < expire_window:
+ # WARN about this upcoming expirations
+ cert_meta['health'] = 'warning'
+ else:
+ # Not expired or about to expire
+ cert_meta['health'] = 'ok'
+
+ cert_meta['expiry'] = expiry_str
+ cert_meta['serial_hex'] = hex(int(cert_meta['serial']))
+ cert_list.append(cert_meta)
+ return cert_list
+
+
+def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs):
+ """Calculate the summary text for when the module finishes
+running. This includes counts of each classification and what have
+you.
+
+Params:
+
+- `certificates` (list of dicts) - Processed `expire_check_result`
+ dicts with filled in `health` keys for system certificates.
+- `kubeconfigs` - as above for kubeconfigs
+- `etcd_certs` - as above for etcd certs
+
+Return:
+
+- `summary_results` (dict) - Counts of each cert type classification
+ and total items examined.
+ """
+ items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs
+
+ summary_results = {
+ 'system_certificates': len(certificates),
+ 'kubeconfig_certificates': len(kubeconfigs),
+ 'etcd_certificates': len(etcd_certs),
+ 'router_certs': len(router_certs),
+ 'registry_certs': len(registry_certs),
+ 'total': len(items),
+ 'ok': 0,
+ 'warning': 0,
+ 'expired': 0
+ }
+
+ summary_results['expired'] = len([c for c in items if c['health'] == 'expired'])
+ summary_results['warning'] = len([c for c in items if c['health'] == 'warning'])
+ summary_results['ok'] = len([c for c in items if c['health'] == 'ok'])
+
+ return summary_results
+
+
+######################################################################
+# This is our module MAIN function after all, so there's bound to be a
+# lot of code bundled up into one block
+#
+# Reason: These checks are disabled because the issue was introduced
+# during a period where the pylint checks weren't enabled for this file
+# Status: temporarily disabled pending future refactoring
+# pylint: disable=too-many-locals,too-many-statements,too-many-branches
+def main():
+ """This module examines certificates (in various forms) which compose
+an OpenShift Container Platform cluster
+ """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ config_base=dict(
+ required=False,
+ default="/etc/origin",
+ type='str'),
+ warning_days=dict(
+ required=False,
+ default=30,
+ type='int'),
+ show_all=dict(
+ required=False,
+ default=False,
+ type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ # Basic scaffolding for OpenShift specific certs
+ openshift_base_config_path = os.path.realpath(module.params['config_base'])
+ openshift_master_config_path = os.path.join(openshift_base_config_path,
+ "master", "master-config.yaml")
+ openshift_node_config_path = os.path.join(openshift_base_config_path,
+ "node", "node-config.yaml")
+ openshift_cert_check_paths = [
+ openshift_master_config_path,
+ openshift_node_config_path,
+ ]
+
+ # Paths for Kubeconfigs. Additional kubeconfigs are conditionally
+ # checked later in the code
+ master_kube_configs = ['admin', 'openshift-master',
+ 'openshift-node', 'openshift-router',
+ 'openshift-registry']
+
+ kubeconfig_paths = []
+ for m_kube_config in master_kube_configs:
+ kubeconfig_paths.append(
+ os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig")
+ )
+
+ # Validate some paths we have the ability to do ahead of time
+ openshift_cert_check_paths = filter_paths(openshift_cert_check_paths)
+ kubeconfig_paths = filter_paths(kubeconfig_paths)
+
+ # etcd, where do you hide your certs? Used when parsing etcd.conf
+ etcd_cert_params = [
+ "ETCD_CA_FILE",
+ "ETCD_CERT_FILE",
+ "ETCD_PEER_CA_FILE",
+ "ETCD_PEER_CERT_FILE",
+ ]
+
+ # Expiry checking stuff
+ now = datetime.datetime.now()
+ # todo, catch exception for invalid input and return a fail_json
+ warning_days = int(module.params['warning_days'])
+ expire_window = datetime.timedelta(days=warning_days)
+
+ # Module stuff
+ #
+ # The results of our cert checking to return from the task call
+ check_results = {}
+ check_results['meta'] = {}
+ check_results['meta']['warning_days'] = warning_days
+ check_results['meta']['checked_at_time'] = str(now)
+ check_results['meta']['warn_before_date'] = str(now + expire_window)
+ check_results['meta']['show_all'] = str(module.params['show_all'])
+ # All the analyzed certs accumulate here
+ ocp_certs = []
+
+ ######################################################################
+ # Sure, why not? Let's enable check mode.
+ if module.check_mode:
+ check_results['ocp_certs'] = []
+ module.exit_json(
+ check_results=check_results,
+ msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'],
+ rc=0,
+ changed=False
+ )
+
+ ######################################################################
+ # Check for OpenShift Container Platform specific certs
+ ######################################################################
+ for os_cert in filter_paths(openshift_cert_check_paths):
+ # Open up that config file and locate the cert and CA
+ with io.open(os_cert, 'r', encoding='utf-8') as fp:
+ cert_meta = {}
+ cfg = yaml.load(fp)
+ # cert files are specified in parsed `fp` as relative to the path
+ # of the original config file. 'master-config.yaml' with certFile
+ # = 'foo.crt' implies that 'foo.crt' is in the same
+ # directory. certFile = '../foo.crt' is in the parent directory.
+ cfg_path = os.path.dirname(fp.name)
+ cert_meta['certFile'] = os.path.join(cfg_path, cfg['servingInfo']['certFile'])
+ cert_meta['clientCA'] = os.path.join(cfg_path, cfg['servingInfo']['clientCA'])
+
+ ######################################################################
+ # Load the certificate and the CA, parse their expiration dates into
+ # datetime objects so we can manipulate them later
+ for v in cert_meta.values():
+ with io.open(v, 'r', encoding='utf-8') as fp:
+ cert = fp.read()
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(cert, now, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
+
+ ######################################################################
+ # /Check for OpenShift Container Platform specific certs
+ ######################################################################
+
+ ######################################################################
+ # Check service Kubeconfigs
+ ######################################################################
+ kubeconfigs = []
+
+ # There may be additional kubeconfigs to check, but their naming
+ # is less predictable than the ones we've already assembled.
+
+ try:
+ # Try to read the standard 'node-config.yaml' file to check if
+ # this host is a node.
+ with io.open(openshift_node_config_path, 'r', encoding='utf-8') as fp:
+ cfg = yaml.load(fp)
+
+ # OK, the config file exists, therefore this is a
+ # node. Nodes have their own kubeconfig files to
+ # communicate with the master API. Let's read the relative
+ # path to that file from the node config.
+ node_masterKubeConfig = cfg['masterKubeConfig']
+ # As before, the path to the 'masterKubeConfig' file is
+ # relative to `fp`
+ cfg_path = os.path.dirname(fp.name)
+ node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)
+
+ with io.open(node_kubeconfig, 'r', encoding='utf8') as fp:
+ # Read in the nodes kubeconfig file and grab the good stuff
+ cfg = yaml.load(fp)
+
+ c = cfg['users'][0]['user']['client-certificate-data']
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
+ except IOError:
+ # This is not a node
+ pass
+
+ for kube in filter_paths(kubeconfig_paths):
+ with io.open(kube, 'r', encoding='utf-8') as fp:
+ # TODO: Maybe consider catching exceptions here?
+ cfg = yaml.load(fp)
+
+ # Per conversation, "the kubeconfigs you care about:
+ # admin, router, registry should all be single
+ # value". Following that advice we only grab the data for
+ # the user at index 0 in the 'users' list. There should
+ # not be more than one user.
+ c = cfg['users'][0]['user']['client-certificate-data']
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
+
+ ######################################################################
+ # /Check service Kubeconfigs
+ ######################################################################
+
+ ######################################################################
+ # Check etcd certs
+ #
+ # Two things to check: 'external' etcd, and embedded etcd.
+ ######################################################################
+ # FIRST: The 'external' etcd
+ #
+ # Some values may be duplicated, make this a set for now so we
+ # unique them all
+ etcd_certs_to_check = set([])
+ etcd_certs = []
+ etcd_cert_params.append('dne')
+ try:
+ with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp:
+ # Add dummy header section.
+ config = io.StringIO()
+ config.write(u'[ETCD]\n')
+ config.write(fp.read().replace('%', '%%'))
+ config.seek(0, os.SEEK_SET)
+
+ etcd_config = configparser.ConfigParser()
+ etcd_config.readfp(config)
+
+ for param in etcd_cert_params:
+ try:
+ etcd_certs_to_check.add(etcd_config.get('ETCD', param))
+ except configparser.NoOptionError:
+ # That parameter does not exist, oh well...
+ pass
+ except IOError:
+ # No etcd to see here, move along
+ pass
+
+ for etcd_cert in filter_paths(etcd_certs_to_check):
+ with io.open(etcd_cert, 'r', encoding='utf-8') as fp:
+ c = fp.read()
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
+
+ ######################################################################
+ # Now the embedded etcd
+ ######################################################################
+ try:
+ with io.open('/etc/origin/master/master-config.yaml', 'r', encoding='utf-8') as fp:
+ cfg = yaml.load(fp)
+ except IOError:
+ # Not present
+ pass
+ else:
+ if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None:
+ # This is embedded
+ etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile']
+ else:
+ # Not embedded
+ etcd_crt_name = None
+
+ if etcd_crt_name is not None:
+ # etcd_crt_name is relative to the location of the
+ # master-config.yaml file
+ cfg_path = os.path.dirname(fp.name)
+ etcd_cert = os.path.join(cfg_path, etcd_crt_name)
+ with open(etcd_cert, 'r') as etcd_fp:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(etcd_fp.read(), now, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': etcd_fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
+
+ ######################################################################
+ # /Check etcd certs
+ ######################################################################
+
+ ######################################################################
+ # Check router/registry certs
+ #
+ # These are saved as secrets in etcd. That means that we can not
+ # simply read a file to grab the data. Instead we're going to
+ # subprocess out to the 'oc get' command. On non-masters this
+ # command will fail, that is expected so we catch that exception.
+ ######################################################################
+ router_certs = []
+ registry_certs = []
+
+ ######################################################################
+ # First the router certs
+ try:
+ router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(),
+ stdout=subprocess.PIPE)
+ router_ds = yaml.load(router_secrets_raw.communicate()[0])
+ router_c = router_ds['data']['tls.crt']
+ router_path = router_ds['metadata']['selfLink']
+ except TypeError:
+ # YAML couldn't load the result, this is not a master
+ pass
+ except OSError:
+ # The OC command doesn't exist here. Move along.
+ pass
+ else:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': router_path,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
+
+ ######################################################################
+ # Now for registry
+ try:
+ registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(),
+ stdout=subprocess.PIPE)
+ registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
+ registry_c = registry_ds['data']['registry.crt']
+ registry_path = registry_ds['metadata']['selfLink']
+ except TypeError:
+ # YAML couldn't load the result, this is not a master
+ pass
+ except OSError:
+ # The OC command doesn't exist here. Move along.
+ pass
+ else:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': registry_path,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
+
+ ######################################################################
+ # /Check router/registry certs
+ ######################################################################
+
+ res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs)
+
+ msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format(
+ count=res['total'],
+ exp=res['expired'],
+ warn=res['warning'],
+ ok=res['ok'],
+ window=int(module.params['warning_days']),
+ )
+
+ # By default we only return detailed information about expired or
+ # warning certificates. If show_all is true then we will print all
+ # the certificates examined.
+ if not module.params['show_all']:
+ check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']]
+ check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']]
+ check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']]
+ check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']]
+ check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']]
+ else:
+ check_results['ocp_certs'] = ocp_certs
+ check_results['kubeconfigs'] = kubeconfigs
+ check_results['etcd'] = etcd_certs
+ check_results['registry'] = registry_certs
+ check_results['router'] = router_certs
+
+ # Sort the final results to report in order of ascending safety
+ # time. That is to say, the certificates which will expire sooner
+ # will be at the front of the list and certificates which will
+ # expire later are at the end. Router and registry certs should be
+ # limited to just 1 result, so don't bother sorting those.
+ def cert_key(item):
+ ''' return the days_remaining key '''
+ return item['days_remaining']
+
+ check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
+ check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
+ check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)
+
+ # This module will never change anything, but we might want to
+ # change the return code parameter if there is some catastrophic
+ # error we noticed earlier
+ module.exit_json(
+ check_results=check_results,
+ summary=res,
+ msg=msg,
+ rc=0,
+ changed=False
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py
new file mode 100644
index 000000000..440b8ec28
--- /dev/null
+++ b/roles/lib_utils/library/openshift_container_binary_sync.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# pylint: disable=missing-docstring,invalid-name
+
+import random
+import tempfile
+import shutil
+import os.path
+
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import * # noqa: F403
+
+
+DOCUMENTATION = '''
+---
+module: openshift_container_binary_sync
+short_description: Copies OpenShift binaries out of the given image tag to host system.
+'''
+
+
+class BinarySyncError(Exception):
+ def __init__(self, msg):
+ super(BinarySyncError, self).__init__(msg)
+ self.msg = msg
+
+
+# pylint: disable=too-few-public-methods,too-many-instance-attributes
+class BinarySyncer(object):
+ """
+ Syncs the openshift, oc, and kubectl binaries/symlinks out of
+ a container onto the host system.
+ """
+
+ def __init__(self, module, image, tag, backend):
+ self.module = module
+ self.changed = False
+ self.output = []
+ self.bin_dir = '/usr/local/bin'
+ self._image = image
+ self.tag = tag
+ self.backend = backend
+ self.temp_dir = None # TBD
+
+ def sync(self):
+ if self.backend == 'atomic':
+ return self._sync_atomic()
+
+ return self._sync_docker()
+
+ def _sync_atomic(self):
+ self.temp_dir = tempfile.mkdtemp()
+ temp_dir_mount = tempfile.mkdtemp()
+ try:
+ image_spec = '%s:%s' % (self.image, self.tag)
+ rc, stdout, stderr = self.module.run_command(['atomic', 'mount',
+ '--storage', "ostree",
+ image_spec, temp_dir_mount])
+ if rc:
+ raise BinarySyncError("Error mounting image. stdout=%s, stderr=%s" %
+ (stdout, stderr))
+ for i in ["openshift", "oc"]:
+ src_file = os.path.join(temp_dir_mount, "usr/bin", i)
+ shutil.copy(src_file, self.temp_dir)
+
+ self._sync_binaries()
+ finally:
+ self.module.run_command(['atomic', 'umount', temp_dir_mount])
+ shutil.rmtree(temp_dir_mount)
+ shutil.rmtree(self.temp_dir)
+
+ def _sync_docker(self):
+ container_name = "openshift-cli-%s" % random.randint(1, 100000)
+ rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name',
+ container_name, '%s:%s' % (self.image, self.tag)])
+ if rc:
+ raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" %
+ (stdout, stderr))
+ self.output.append(stdout)
+ try:
+ self.temp_dir = tempfile.mkdtemp()
+ self.output.append("Using temp dir: %s" % self.temp_dir)
+
+ rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name,
+ self.temp_dir])
+ if rc:
+ raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
+ (stdout, stderr))
+
+ rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name,
+ self.temp_dir])
+ if rc:
+ raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
+ (stdout, stderr))
+
+ self._sync_binaries()
+ finally:
+ shutil.rmtree(self.temp_dir)
+ self.module.run_command(['docker', 'rm', container_name])
+
+ def _sync_binaries(self):
+ self._sync_binary('openshift')
+
+ # In older versions, oc was a symlink to openshift:
+ if os.path.islink(os.path.join(self.temp_dir, 'oc')):
+ self._sync_symlink('oc', 'openshift')
+ else:
+ self._sync_binary('oc')
+
+ # Ensure correct symlinks created:
+ self._sync_symlink('kubectl', 'openshift')
+
+ # Remove old oadm binary
+ if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+ os.remove(os.path.join(self.bin_dir, 'oadm'))
+
+ def _sync_symlink(self, binary_name, link_to):
+ """ Ensure the given binary name exists and links to the expected binary. """
+
+ # The symlink we are creating:
+ link_path = os.path.join(self.bin_dir, binary_name)
+
+ # The expected file we should be linking to:
+ link_dest = os.path.join(self.bin_dir, link_to)
+
+ if not os.path.exists(link_path) or \
+ not os.path.islink(link_path) or \
+ os.path.realpath(link_path) != os.path.realpath(link_dest):
+ if os.path.exists(link_path):
+ os.remove(link_path)
+ os.symlink(link_to, os.path.join(self.bin_dir, binary_name))
+ self.output.append("Symlinked %s to %s." % (link_path, link_dest))
+ self.changed = True
+
+ def _sync_binary(self, binary_name):
+ src_path = os.path.join(self.temp_dir, binary_name)
+ dest_path = os.path.join(self.bin_dir, binary_name)
+ incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]
+ if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:
+
+ # See: https://github.com/openshift/openshift-ansible/issues/4965
+ if os.path.islink(dest_path):
+ os.unlink(dest_path)
+ self.output.append('Removed old symlink {} before copying binary.'.format(dest_path))
+ shutil.move(src_path, dest_path)
+ self.output.append("Moved %s to %s." % (src_path, dest_path))
+ self.changed = True
+
+ @property
+ def raw_image(self):
+ """
+ Returns the image as it was originally passed in to the instance.
+
+ .. note::
+ This image string will only work directly with the atomic command.
+
+ :returns: The original image passed in.
+ :rtype: str
+ """
+ return self._image
+
+ @property
+ def image(self):
+ """
+ Returns the image without atomic prefixes used to map to skopeo args.
+
+ :returns: The image string without prefixes
+ :rtype: str
+ """
+ image = self._image
+ for remove in ('oci:', 'http:', 'https:'):
+ if image.startswith(remove):
+ image = image.replace(remove, '')
+ return image
+
+
+def main():
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ image=dict(required=True),
+ tag=dict(required=True),
+ backend=dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ image = module.params['image']
+ tag = module.params['tag']
+ backend = module.params['backend']
+
+ if backend not in ["docker", "atomic"]:
+ module.fail_json(msg="unknown backend")
+
+ binary_syncer = BinarySyncer(module, image, tag, backend)
+
+ try:
+ binary_syncer.sync()
+ except BinarySyncError as ex:
+ module.fail_json(msg=ex.msg)
+
+ return module.exit_json(changed=binary_syncer.changed,
+ output=binary_syncer.output)
+
+
+if __name__ == '__main__':
+ main()