summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
authorLuke Meyer <lmeyer@redhat.com>2017-06-01 19:24:31 -0400
committerLuke Meyer <lmeyer@redhat.com>2017-06-07 19:27:31 -0400
commita092dff53070c4eaa942dae36fc8742a7d53959d (patch)
tree332bd39ce6938982317c7054c48e239e242f71e5 /roles
parent055082c1679cb253758bc16e0a6ca37f70d0bc65 (diff)
downloadopenshift-a092dff53070c4eaa942dae36fc8742a7d53959d.tar.gz
openshift-a092dff53070c4eaa942dae36fc8742a7d53959d.tar.bz2
openshift-a092dff53070c4eaa942dae36fc8742a7d53959d.tar.xz
openshift-a092dff53070c4eaa942dae36fc8742a7d53959d.zip
docker checks: finish and refactor
Incorporated docker_storage_driver into docker_storage as both need driver info. Corrected storage calculation to include VG free space, not just the current amount in the LV pool. Now makes no assumptions about pool name. Improved user messaging. Factored out some methods that can be shared with docker_image_availability.
Diffstat (limited to 'roles')
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py22
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py257
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage_driver.py50
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py40
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py24
-rw-r--r--roles/openshift_health_checker/test/docker_storage_driver_test.py81
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py367
7 files changed, 397 insertions, 444 deletions
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 4588ed634..27e6fe383 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,8 +1,9 @@
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks.mixins import DockerHostMixin
-class DockerImageAvailability(OpenShiftCheck):
+class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Check that required Docker images are available.
This check attempts to ensure that required docker images are
@@ -36,19 +37,11 @@ class DockerImageAvailability(OpenShiftCheck):
def run(self, tmp, task_vars):
msg, failed, changed = self.ensure_dependencies(task_vars)
-
- # exit early if Skopeo update fails
if failed:
- if "No package matching" in msg:
- msg = "Ensure that all required dependencies can be installed via `yum`.\n"
return {
"failed": True,
"changed": changed,
- "msg": (
- "Unable to update or install required dependency packages on this host;\n"
- "These are required in order to check Docker image availability:"
- "\n {deps}\n{msg}"
- ).format(deps=',\n '.join(self.dependencies), msg=msg),
+ "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
}
required_images = self.required_images(task_vars)
@@ -168,12 +161,3 @@ class DockerImageAvailability(OpenShiftCheck):
args = {"_raw_params": cmd_str}
result = self.module_executor("command", args, task_vars)
return not result.get("failed", False) and result.get("rc", 0) == 0
-
- # ensures that the skopeo and python-docker-py packages exist
- # check is skipped on atomic installations
- def ensure_dependencies(self, task_vars):
- if get_var(task_vars, "openshift", "common", "is_atomic"):
- return "", False, False
-
- result = self.module_executor("yum", {"name": self.dependencies, "state": "latest"}, task_vars)
- return result.get("msg", ""), result.get("failed", False) or result.get("rc", 0) != 0, result.get("changed")
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
index 2dfe10a02..5c9bed97e 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_storage.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -1,110 +1,185 @@
-# pylint: disable=missing-docstring
+"""Check Docker storage driver and usage."""
import json
-
+import re
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import DockerHostMixin
-class DockerStorage(OpenShiftCheck):
- """Check Docker storage sanity.
+class DockerStorage(DockerHostMixin, OpenShiftCheck):
+ """Check Docker storage driver compatibility.
- Check for thinpool usage during a containerized installation
+ This check ensures that Docker is using a supported storage driver,
+ and that loopback is not being used (if using devicemapper).
+ Also that storage usage is not above threshold.
"""
name = "docker_storage"
- tags = ["preflight"]
+ tags = ["pre-install", "health", "preflight"]
+ dependencies = ["python-docker-py"]
+ storage_drivers = ["devicemapper", "overlay2"]
max_thinpool_data_usage_percent = 90.0
max_thinpool_meta_usage_percent = 90.0
- @classmethod
- def is_active(cls, task_vars):
- """Only run on hosts that depend on Docker."""
- is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
- is_node = "nodes" in get_var(task_vars, "group_names", default=[])
- return (super(DockerStorage, cls).is_active(task_vars) and is_containerized) or is_node
-
+ # pylint: disable=too-many-return-statements
+ # Reason: permanent stylistic exception;
+ # it is clearer to return on failures and there are just many ways to fail here.
def run(self, tmp, task_vars):
- try:
- self.max_thinpool_data_usage_percent = float(get_var(task_vars, "max_thinpool_data_usage_percent",
- default=self.max_thinpool_data_usage_percent))
- self.max_thinpool_meta_usage_percent = float(get_var(task_vars, "max_thinpool_metadata_usage_percent",
- default=self.max_thinpool_meta_usage_percent))
- except ValueError as err:
+ msg, failed, changed = self.ensure_dependencies(task_vars)
+ if failed:
return {
"failed": True,
- "msg": "Unable to convert thinpool data usage limit to float: {}".format(str(err))
+ "changed": changed,
+ "msg": "Some dependencies are required in order to query docker storage on host:\n" + msg
}
- err_msg = self.check_thinpool_usage(task_vars)
- if err_msg:
- return {"failed": True, "msg": err_msg}
-
- return {}
-
- def check_thinpool_usage(self, task_vars):
- lvs = self.get_lvs_data(task_vars)
- lv_data = self.extract_thinpool_obj(lvs)
-
- data_percent = self.get_thinpool_data_usage(lv_data)
- metadata_percent = self.get_thinpool_metadata_usage(lv_data)
-
- if data_percent > self.max_thinpool_data_usage_percent:
- msg = "thinpool data usage above maximum threshold of {threshold}%"
- return msg.format(threshold=self.max_thinpool_data_usage_percent)
-
- if metadata_percent > self.max_thinpool_meta_usage_percent:
- msg = "thinpool metadata usage above maximum threshold of {threshold}%"
- return msg.format(threshold=self.max_thinpool_meta_usage_percent)
-
- return ""
-
- def get_lvs_data(self, task_vars):
- lvs_cmd = "/sbin/lvs --select vg_name=docker --select lv_name=docker-pool --report-format json"
- result = self.exec_cmd(lvs_cmd, task_vars)
-
- if result.get("failed", False):
- msg = "no thinpool usage data returned by the host: {}"
- raise OpenShiftCheckException(msg.format(result.get("msg", "")))
-
- try:
- data_json = json.loads(result.get("stdout", ""))
- except ValueError as err:
- raise OpenShiftCheckException("Invalid JSON value returned by lvs command: {}".format(str(err)))
-
- data = data_json.get("report")
- if not data:
- raise OpenShiftCheckException("no thinpool usage data returned by the host.")
-
- return data
-
- @staticmethod
- def get_thinpool_data_usage(thinpool_lv_data):
- data = thinpool_lv_data.get("data_percent")
- if not data:
- raise OpenShiftCheckException("no thinpool usage data returned by the host.")
-
- return float(data)
+ # attempt to get the docker info hash from the API
+ info = self.execute_module("docker_info", {}, task_vars)
+ if info.get("failed"):
+ return {"failed": True, "changed": changed,
+ "msg": "Failed to query Docker API. Is docker running on this host?"}
+ if not info.get("info"): # this would be very strange
+ return {"failed": True, "changed": changed,
+ "msg": "Docker API query missing info:\n{}".format(json.dumps(info))}
+ info = info["info"]
+
+ # check if the storage driver we saw is valid
+ driver = info.get("Driver", "[NONE]")
+ if driver not in self.storage_drivers:
+ msg = (
+ "Detected unsupported Docker storage driver '{driver}'.\n"
+ "Supported storage drivers are: {drivers}"
+ ).format(driver=driver, drivers=', '.join(self.storage_drivers))
+ return {"failed": True, "changed": changed, "msg": msg}
+
+ # driver status info is a list of tuples; convert to dict and validate based on driver
+ driver_status = {item[0]: item[1] for item in info.get("DriverStatus", [])}
+ if driver == "devicemapper":
+ if driver_status.get("Data loop file"):
+ msg = (
+ "Use of loopback devices with the Docker devicemapper storage driver\n"
+ "(the default storage configuration) is unsupported in production.\n"
+ "Please use docker-storage-setup to configure a backing storage volume.\n"
+ "See http://red.ht/2rNperO for further information."
+ )
+ return {"failed": True, "changed": changed, "msg": msg}
+ result = self._check_dm_usage(driver_status, task_vars)
+ result["changed"] = changed
+ return result
+
+ # TODO(lmeyer): determine how to check usage for overlay2
+
+ return {"changed": changed}
+
+ def _check_dm_usage(self, driver_status, task_vars):
+ """
+ Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
+ implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
+ devicemapper storage. The LV is "thin" because it does not use all available storage
+ from its VG, instead expanding as needed; so to determine available space, we gather
+ current usage as the Docker API reports for the driver as well as space available for
+ expansion in the pool's VG.
+ Usage within the LV is divided into pools allocated to data and metadata, either of which
+ could run out of space first; so we check both.
+ """
+ vals = dict(
+ vg_free=self._get_vg_free(driver_status.get("Pool Name"), task_vars),
+ data_used=driver_status.get("Data Space Used"),
+ data_total=driver_status.get("Data Space Total"),
+ metadata_used=driver_status.get("Metadata Space Used"),
+ metadata_total=driver_status.get("Metadata Space Total"),
+ )
+
+ # convert all human-readable strings to bytes
+ for key, value in vals.copy().items():
+ try:
+ vals[key + "_bytes"] = self._convert_to_bytes(value)
+ except ValueError as err: # unlikely to hit this from API info, but just to be safe
+ return {
+ "failed": True,
+ "values": vals,
+ "msg": "Could not interpret {} value '{}' as bytes: {}".format(key, value, str(err))
+ }
+
+ # determine the threshold percentages which usage should not exceed
+ for name, default in [("data", self.max_thinpool_data_usage_percent),
+ ("metadata", self.max_thinpool_meta_usage_percent)]:
+ percent = get_var(task_vars, "max_thinpool_" + name + "_usage_percent", default=default)
+ try:
+ vals[name + "_threshold"] = float(percent)
+ except ValueError:
+ return {
+ "failed": True,
+ "msg": "Specified thinpool {} usage limit '{}' is not a percentage".format(name, percent)
+ }
+
+ # test whether the thresholds are exceeded
+ messages = []
+ for name in ["data", "metadata"]:
+ vals[name + "_pct_used"] = 100 * vals[name + "_used_bytes"] / (
+ vals[name + "_total_bytes"] + vals["vg_free_bytes"])
+ if vals[name + "_pct_used"] > vals[name + "_threshold"]:
+ messages.append(
+ "Docker thinpool {name} usage percentage {pct:.1f} "
+ "is higher than threshold {thresh:.1f}.".format(
+ name=name,
+ pct=vals[name + "_pct_used"],
+ thresh=vals[name + "_threshold"],
+ ))
+ vals["failed"] = True
+
+ vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
+ return vals
+
+ def _get_vg_free(self, pool, task_vars):
+ # Determine which VG to examine according to the pool name, the only indicator currently
+ # available from the Docker API driver info. We assume a name that looks like
+ # "vg--name-docker--pool"; vg and lv names with inner hyphens doubled, joined by a hyphen.
+ match = re.match(r'((?:[^-]|--)+)-(?!-)', pool) # matches up to the first single hyphen
+ if not match: # unlikely, but... be clear if we assumed wrong
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{}'.\n"
+ "However this name does not have the expected format of 'vgname-lvname'\n"
+ "so the available storage in the VG cannot be determined.".format(pool)
+ )
+ vg_name = match.groups()[0].replace("--", "-")
+ vgs_cmd = "/sbin/vgs --noheadings -o vg_free --select vg_name=" + vg_name
+ # should return free space like " 12.00g" if the VG exists; empty if it does not
+
+ ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars)
+ if ret.get("failed") or ret.get("rc", 0) != 0:
+ raise OpenShiftCheckException(
+ "Is LVM installed? Failed to run /sbin/vgs "
+ "to determine docker storage usage:\n" + ret.get("msg", "")
+ )
+ size = ret.get("stdout", "").strip()
+ if not size:
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{pool}'.\n"
+ "which we expect to come from local VG '{vg}'.\n"
+ "However, /sbin/vgs did not find this VG. Is Docker for this host"
+ "running and using the storage on the host?".format(pool=pool, vg=vg_name)
+ )
+ return size
@staticmethod
- def get_thinpool_metadata_usage(thinpool_lv_data):
- data = thinpool_lv_data.get("metadata_percent")
- if not data:
- raise OpenShiftCheckException("no thinpool usage data returned by the host.")
-
- return float(data)
-
- @staticmethod
- def extract_thinpool_obj(thinpool_data):
- if not thinpool_data or not thinpool_data[0]:
- raise OpenShiftCheckException("no thinpool usage data returned by the host.")
-
- lv_data = thinpool_data[0].get("lv")
- if not lv_data or not lv_data[0]:
- raise OpenShiftCheckException("no thinpool usage data returned by the host.")
-
- return lv_data[0]
-
- def exec_cmd(self, cmd_str, task_vars):
- return self.execute_module("command", {
- "_raw_params": cmd_str,
- }, task_vars)
+ def _convert_to_bytes(string):
+ units = dict(
+ b=1,
+ k=1024,
+ m=1024**2,
+ g=1024**3,
+ t=1024**4,
+ p=1024**5,
+ )
+ string = string or ""
+ match = re.match(r'(\d+(?:\.\d+)?)\s*(\w)?', string) # float followed by optional unit
+ if not match:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ number, unit = match.groups()
+ multiplier = 1 if not unit else units.get(unit.lower())
+ if not multiplier:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ return float(number) * multiplier
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage_driver.py b/roles/openshift_health_checker/openshift_checks/docker_storage_driver.py
deleted file mode 100644
index 94ea7ba9c..000000000
--- a/roles/openshift_health_checker/openshift_checks/docker_storage_driver.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, get_var
-
-
-class DockerStorageDriver(OpenShiftCheck):
- """Check Docker storage driver compatibility.
-
- This check ensures that Docker is using a supported storage driver,
- and that Loopback is not being used (if using devicemapper).
- """
-
- name = "docker_storage_driver"
- tags = ["preflight"]
-
- storage_drivers = ["devicemapper", "overlay2"]
-
- @classmethod
- def is_active(cls, task_vars):
- """Skip non-containerized installations."""
- is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
- return super(DockerStorageDriver, cls).is_active(task_vars) and is_containerized
-
- def run(self, tmp, task_vars):
- info = self.execute_module("docker_info", {}, task_vars).get("info", {})
-
- if not self.is_supported_storage_driver(info):
- msg = "Unsupported Docker storage driver detected. Supported storage drivers: {drivers}"
- return {"failed": True, "msg": msg.format(drivers=', '.join(self.storage_drivers))}
-
- if self.is_using_loopback_device(info):
- msg = "Use of loopback devices is discouraged. Try running Docker with `--storage-opt dm.thinpooldev`"
- return {"failed": True, "msg": msg}
-
- return {}
-
- def is_supported_storage_driver(self, docker_info):
- return docker_info.get("Driver", "") in self.storage_drivers
-
- @staticmethod
- def is_using_loopback_device(docker_info):
- # Loopback device usage is only an issue if using devicemapper.
- # Skip this check if using any other storage driver.
- if docker_info.get("Driver", "") != "devicemapper":
- return False
-
- for status in docker_info.get("DriverStatus", []):
- if status[0] == "Data loop file":
- return bool(status[1])
-
- return False
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 20d160eaf..1181784ab 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,4 +1,3 @@
-# pylint: disable=missing-docstring,too-few-public-methods
"""
Mixin classes meant to be used with subclasses of OpenShiftCheck.
"""
@@ -8,8 +7,47 @@ from openshift_checks import get_var
class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
+ # permanent # pylint: disable=too-few-public-methods
+ # Reason: The mixin is not intended to stand on its own as a class.
@classmethod
def is_active(cls, task_vars):
+ """Only run on non-containerized hosts."""
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
+
+
+class DockerHostMixin(object):
+ """Mixin for checks that are only active on hosts that require Docker."""
+
+ dependencies = []
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Only run on hosts that depend on Docker."""
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ is_node = "nodes" in get_var(task_vars, "group_names", default=[])
+ return super(DockerHostMixin, cls).is_active(task_vars) and (is_containerized or is_node)
+
+ def ensure_dependencies(self, task_vars):
+ """
+ Ensure that docker-related packages exist, but not on atomic hosts
+ (which would not be able to install but should already have them).
+ Returns: msg, failed, changed
+ """
+ if get_var(task_vars, "openshift", "common", "is_atomic"):
+ return "", False, False
+
+ # NOTE: we would use the "package" module but it's actually an action plugin
+ # and it's not clear how to invoke one of those. This is about the same anyway:
+ pkg_manager = get_var(task_vars, "ansible_pkg_mgr", default="yum")
+ result = self.module_executor(pkg_manager, {"name": self.dependencies, "state": "present"}, task_vars)
+ msg = result.get("msg", "")
+ if result.get("failed"):
+ if "No package matching" in msg:
+ msg = "Ensure that all required dependencies can be installed via `yum`.\n"
+ msg = (
+ "Unable to install required packages on this host:\n"
+ " {deps}\n{msg}"
+ ).format(deps=',\n '.join(self.dependencies), msg=msg)
+ return msg, result.get("failed") or result.get("rc", 0) != 0, result.get("changed")
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 0379cafb5..197c65f51 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,19 +3,25 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
-@pytest.mark.parametrize('deployment_type,is_active', [
- ("origin", True),
- ("openshift-enterprise", True),
- ("enterprise", False),
- ("online", False),
- ("invalid", False),
- ("", False),
+@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+ ("origin", True, [], True),
+ ("openshift-enterprise", True, [], True),
+ ("enterprise", True, [], False),
+ ("online", True, [], False),
+ ("invalid", True, [], False),
+ ("", True, [], False),
+ ("origin", False, [], False),
+ ("openshift-enterprise", False, [], False),
+ ("origin", False, ["nodes", "masters"], True),
+ ("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_active):
+def test_is_active(deployment_type, is_containerized, group_names, expect_active):
task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
openshift_deployment_type=deployment_type,
+ group_names=group_names,
)
- assert DockerImageAvailability.is_active(task_vars=task_vars) == is_active
+ assert DockerImageAvailability.is_active(task_vars=task_vars) == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
diff --git a/roles/openshift_health_checker/test/docker_storage_driver_test.py b/roles/openshift_health_checker/test/docker_storage_driver_test.py
deleted file mode 100644
index 34a8f827a..000000000
--- a/roles/openshift_health_checker/test/docker_storage_driver_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import pytest
-
-
-from openshift_checks.docker_storage_driver import DockerStorageDriver
-
-
-@pytest.mark.parametrize('is_containerized,is_active', [
- (False, False),
- (True, True),
-])
-def test_is_active(is_containerized, is_active):
- task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
- )
- assert DockerStorageDriver.is_active(task_vars=task_vars) == is_active
-
-
-@pytest.mark.parametrize('info,failed,extra_words', [
- (
- {
- "Driver": "devicemapper",
- "DriverStatus": [("Pool Name", "docker-docker--pool")],
- },
- False,
- [],
- ),
- (
- {
- "Driver": "devicemapper",
- "DriverStatus": [("Data loop file", "true")],
- },
- True,
- ["Use of loopback devices is discouraged"],
- ),
- (
- {
- "Driver": "overlay2",
- "DriverStatus": []
- },
- False,
- [],
- ),
- (
- {
- "Driver": "overlay",
- },
- True,
- ["Unsupported Docker storage driver"],
- ),
- (
- {
- "Driver": "unsupported",
- },
- True,
- ["Unsupported Docker storage driver"],
- ),
-])
-def test_check_storage_driver(info, failed, extra_words):
- def execute_module(module_name, args, tmp=None, task_vars=None):
- if module_name != "docker_info":
- return {
- "changed": False,
- }
-
- return {
- "info": info
- }
-
- task_vars = dict(
- openshift=dict(common=dict(is_containerized=True))
- )
-
- check = DockerStorageDriver(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
-
- if failed:
- assert check["failed"]
- else:
- assert not check.get("failed", False)
-
- for word in extra_words:
- assert word in check["msg"]
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
index 73c433383..292a323db 100644
--- a/roles/openshift_health_checker/test/docker_storage_test.py
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -1,243 +1,224 @@
import pytest
-import json
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.docker_storage import DockerStorage
-from openshift_checks.docker_storage import DockerStorage, OpenShiftCheckException
+def dummy_check(execute_module=None):
+ def dummy_exec(self, status, task_vars):
+ raise Exception("dummy executor called")
+ return DockerStorage(execute_module=execute_module or dummy_exec)
-@pytest.mark.parametrize('is_containerized,is_active', [
- (False, False),
- (True, True),
+
+@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+ (False, ["masters", "etcd"], False),
+ (False, ["masters", "nodes"], True),
+ (True, ["etcd"], True),
])
-def test_is_active(is_containerized, is_active):
+def test_is_active(is_containerized, group_names, is_active):
task_vars = dict(
openshift=dict(common=dict(is_containerized=is_containerized)),
+ group_names=group_names,
)
assert DockerStorage.is_active(task_vars=task_vars) == is_active
-@pytest.mark.parametrize('stdout,message,failed,extra_words', [
- (None, "", True, ["no thinpool usage data"]),
- ("", "", False, ["Invalid JSON value returned by lvs command"]),
- (None, "invalid response", True, ["invalid response"]),
- ("invalid", "invalid response", False, ["Invalid JSON value"]),
-])
-def test_get_lvs_data_with_failed_response(stdout, message, failed, extra_words):
- def execute_module(module_name, args, tmp=None, task_vars=None):
- if module_name != "command":
- return {
- "changed": False,
- }
-
- response = {
- "stdout": stdout,
- "msg": message,
- "failed": failed,
- }
-
- if stdout is None:
- response.pop("stdout")
-
- return response
+non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
- task_vars = dict(
- max_thinpool_data_usage_percent=90.0
- )
-
- check = DockerStorage(execute_module=execute_module)
- with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
-
- for word in extra_words:
- assert word in str(excinfo.value)
-
-@pytest.mark.parametrize('limit_percent,failed,extra_words', [
- ("90.0", False, []),
- (80.0, False, []),
- ("invalid percent", True, ["Unable to convert", "to float", "invalid percent"]),
- ("90%", True, ["Unable to convert", "to float", "90%"]),
+@pytest.mark.parametrize('docker_info, failed, expect_msg', [
+ (
+ dict(failed=True, msg="Error connecting: Error while fetching server API version"),
+ True,
+ ["Is docker running on this host?"],
+ ),
+ (
+ dict(msg="I have no info"),
+ True,
+ ["missing info"],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Pool Name", "docker-docker--pool")],
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Data loop file", "true")],
+ }),
+ True,
+ ["loopback devices with the Docker devicemapper storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay2",
+ "DriverStatus": []
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "unsupported",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
])
-def test_invalid_value_for_thinpool_usage_limit(limit_percent, failed, extra_words):
+def test_check_storage_driver(docker_info, failed, expect_msg):
def execute_module(module_name, args, tmp=None, task_vars=None):
- if module_name != "command":
- return {
- "changed": False,
- }
-
- return {
- "stdout": json.dumps({
- "report": [
- {
- "lv": [
- {"lv_name": "docker-pool", "vg_name": "docker", "lv_attr": "twi-aot---", "lv_size": "6.95g",
- "pool_lv": "", "origin": "", "data_percent": "58.96", "metadata_percent": "4.77",
- "move_pv": "", "mirror_log": "", "copy_percent": "", "convert_lv": ""},
- ]
- }
- ]
- }),
- "failed": False,
- }
-
- task_vars = dict(
- max_thinpool_data_usage_percent=limit_percent
- )
+ if module_name == "yum":
+ return {}
+ if module_name != "docker_info":
+ raise ValueError("not expecting module " + module_name)
+ return docker_info
- check = DockerStorage(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+ check = dummy_check(execute_module=execute_module)
+ check._check_dm_usage = lambda status, task_vars: dict() # stub out for this test
+ result = check.run(tmp=None, task_vars=non_atomic_task_vars)
if failed:
- assert check["failed"]
-
- for word in extra_words:
- assert word in check["msg"]
+ assert result["failed"]
else:
- assert not check.get("failed", False)
+ assert not result.get("failed", False)
+ for word in expect_msg:
+ assert word in result["msg"]
-def test_get_lvs_data_with_valid_response():
- def execute_module(module_name, args, tmp=None, task_vars=None):
- if module_name != "command":
- return {
- "changed": False,
- }
-
- return {
- "stdout": json.dumps({
- "report": [
- {
- "lv": [
- {"lv_name": "docker-pool", "vg_name": "docker", "lv_attr": "twi-aot---", "lv_size": "6.95g",
- "pool_lv": "", "origin": "", "data_percent": "58.96", "metadata_percent": "4.77",
- "move_pv": "", "mirror_log": "", "copy_percent": "", "convert_lv": ""}
- ]
- }
- ]
- })
- }
- task_vars = dict(
- max_thinpool_data_usage_percent="90"
- )
+enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "19.92 MB",
+ "Data Space Total": "8.535 GB",
+ "Metadata Space Used": "40.96 kB",
+ "Metadata Space Total": "25.17 MB",
+}
- check = DockerStorage(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
- assert not check.get("failed", False)
+not_enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "10 GB",
+ "Data Space Total": "10 GB",
+ "Metadata Space Used": "42 kB",
+ "Metadata Space Total": "43 kB",
+}
-@pytest.mark.parametrize('response,extra_words', [
+@pytest.mark.parametrize('task_vars, driver_status, vg_free, success, expect_msg', [
(
- {
- "report": [{}],
- },
- ["no thinpool usage data"],
+ {"max_thinpool_data_usage_percent": "not a float"},
+ enough_space,
+ "12g",
+ False,
+ ["is not a percentage"],
),
(
- {
- "report": [
- {
- "lv": [
- {"vg_name": "docker", "lv_attr": "twi-aot---", "lv_size": "6.95g",
- "move_pv": "", "mirror_log": "", "copy_percent": "", "convert_lv": ""}
- ]
- }
- ],
- },
- ["no thinpool usage data"],
+ {},
+ {}, # empty values from driver status
+ "bogus", # also does not parse as bytes
+ False,
+ ["Could not interpret", "as bytes"],
),
(
- {
- "report": [
- {
- "lv": [],
- }
- ],
- },
- ["no thinpool usage data"],
+ {},
+ enough_space,
+ "12.00g",
+ True,
+ [],
),
(
- {
- "report": [
- {
- "lv": [
- {"lv_name": "docker-pool", "vg_name": "docker", "lv_attr": "twi-aot---", "lv_size": "6.95g",
- "pool_lv": "", "origin": "", "data_percent": "58.96",
- "move_pv": "", "mirror_log": "", "copy_percent": "", "convert_lv": ""}
- ]
- }
- ],
- },
- ["no thinpool usage data"],
+ {},
+ not_enough_space,
+ "0.00",
+ False,
+ ["data usage", "metadata usage", "higher than threshold"],
),
])
-def test_get_lvs_data_with_incomplete_response(response, extra_words):
- def execute_module(module_name, args, tmp=None, task_vars=None):
- if module_name != "command":
- return {
- "changed": False,
- }
-
- return {
- "stdout": json.dumps(response)
- }
+def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
+ check = dummy_check()
+ check._get_vg_free = lambda pool, task_vars: vg_free
+ result = check._check_dm_usage(driver_status, task_vars)
+ result_success = not result.get("failed")
- task_vars = dict(
- max_thinpool_data_usage_percent=90.0
- )
-
- check = DockerStorage(execute_module=execute_module)
- with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
-
- assert "no thinpool usage data" in str(excinfo.value)
+ assert result_success is success
+ for msg in expect_msg:
+ assert msg in result["msg"]
-@pytest.mark.parametrize('response,extra_words', [
+@pytest.mark.parametrize('pool, command_returns, raises, returns', [
(
- {
- "report": [
- {
- "lv": [
- {"lv_name": "docker-pool", "vg_name": "docker", "lv_attr": "twi-aot---", "lv_size": "6.95g",
- "pool_lv": "", "origin": "", "data_percent": "100.0", "metadata_percent": "90.0",
- "move_pv": "", "mirror_log": "", "copy_percent": "", "convert_lv": ""}
- ]
- }
- ],
+ "foo-bar",
+ { # vgs missing
+ "msg": "[Errno 2] No such file or directory",
+ "failed": True,
+ "cmd": "/sbin/vgs",
+ "rc": 2,
},
- ["thinpool data usage above maximum threshold"],
+ "Failed to run /sbin/vgs",
+ None,
),
(
- {
- "report": [
- {
- "lv": [
- {"lv_name": "docker-pool", "vg_name": "docker", "lv_attr": "twi-aot---", "lv_size": "6.95g",
- "pool_lv": "", "origin": "", "data_percent": "10.0", "metadata_percent": "91.0",
- "move_pv": "", "mirror_log": "", "copy_percent": "", "convert_lv": ""}
- ]
- }
- ],
- },
- ["thinpool metadata usage above maximum threshold"],
+ "foo", # no hyphen in name - should not happen
+ {},
+ "name does not have the expected format",
+ None,
+ ),
+ (
+ "foo-bar",
+ dict(stdout=" 4.00g\n"),
+ None,
+ "4.00g",
),
+ (
+ "foo-bar",
+ dict(stdout="\n"), # no matching VG
+ "vgs did not find this VG",
+ None,
+ )
])
-def test_get_lvs_data_with_high_thinpool_usage(response, extra_words):
+def test_vg_free(pool, command_returns, raises, returns):
def execute_module(module_name, args, tmp=None, task_vars=None):
if module_name != "command":
- return {
- "changed": False,
- }
+ raise ValueError("not expecting module " + module_name)
+ return command_returns
+
+ check = dummy_check(execute_module=execute_module)
+ if raises:
+ with pytest.raises(OpenShiftCheckException) as err:
+ check._get_vg_free(pool, {})
+ assert raises in str(err.value)
+ else:
+ ret = check._get_vg_free(pool, {})
+ assert ret == returns
- return {
- "stdout": json.dumps(response),
- }
- task_vars = dict(
- max_thinpool_data_usage_percent="90"
- )
+@pytest.mark.parametrize('string, expect_bytes', [
+ ("12", 12.0),
+ ("12 k", 12.0 * 1024),
+ ("42.42 MB", 42.42 * 1024**2),
+ ("12g", 12.0 * 1024**3),
+])
+def test_convert_to_bytes(string, expect_bytes):
+ got = DockerStorage._convert_to_bytes(string)
+ assert got == expect_bytes
- check = DockerStorage(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
- assert check["failed"]
- for word in extra_words:
- assert word in check["msg"]
+@pytest.mark.parametrize('string', [
+ "bork",
+ "42 Qs",
+])
+def test_convert_to_bytes_error(string):
+ with pytest.raises(ValueError) as err:
+ DockerStorage._convert_to_bytes(string)
+ assert "Cannot convert" in str(err.value)
+ assert string in str(err.value)