summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py4
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py30
2 files changed, 32 insertions, 2 deletions
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index e8821f0ba..6e951e82c 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -54,12 +54,12 @@ class LoggingCheck(OpenShiftCheck):
"""Returns: list of pods not in a ready and running state"""
return [
pod for pod in pods
- if any(
+ if not pod.get("status", {}).get("containerStatuses") or any(
container['ready'] is False
for container in pod['status']['containerStatuses']
) or not any(
condition['type'] == 'Ready' and condition['status'] == 'True'
- for condition in pod['status']['conditions']
+ for condition in pod['status'].get('conditions', [])
)
]
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
index 3b9e3fa8d..128b76b12 100644
--- a/roles/openshift_health_checker/test/logging_check_test.py
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -50,6 +50,16 @@ plain_kibana_pod = {
}
}
+plain_kibana_pod_no_containerstatus = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
fluentd_pod_node1 = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
@@ -135,3 +145,23 @@ def test_get_pods_for_component(pod_output, expect_pods, expect_error):
{}
)
assert_error(error, expect_error)
+
+
+@pytest.mark.parametrize('name, pods, expected_pods', [
+ (
+ 'test single pod found, scheduled, but no containerStatuses field',
+ [plain_kibana_pod_no_containerstatus],
+ [plain_kibana_pod_no_containerstatus],
+ ),
+ (
+ 'set of pods has at least one pod with containerStatuses (scheduled); should still fail',
+ [plain_kibana_pod_no_containerstatus, plain_kibana_pod],
+ [plain_kibana_pod_no_containerstatus],
+ ),
+
+], ids=lambda argvals: argvals[0])
+def test_get_not_running_pods_no_container_status(name, pods, expected_pods):
+ check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: '')
+ result = check.not_running_pods(pods)
+
+ assert result == expected_pods