summaryrefslogtreecommitdiffstats
path: root/roles/openshift_health_checker/test/logging_index_time_test.py
blob: 22566b295510fe06558f1c43ae9550bc947d433b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import json

import pytest

from openshift_checks.logging.logging_index_time import LoggingIndexTime, OpenShiftCheckException


SAMPLE_UUID = "unique-test-uuid"


def canned_loggingindextime(exec_oc=None):
    """Create a check object with a canned exec_oc method"""
    check = LoggingIndexTime()  # fails if a module is actually invoked
    if exec_oc:
        check.exec_oc = exec_oc
    return check


plain_running_elasticsearch_pod = {
    "metadata": {
        "labels": {"component": "es", "deploymentconfig": "logging-es-data-master"},
        "name": "logging-es-data-master-1",
    },
    "status": {
        "containerStatuses": [{"ready": True}, {"ready": True}],
        "phase": "Running",
    }
}
plain_running_kibana_pod = {
    "metadata": {
        "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
        "name": "logging-kibana-1",
    },
    "status": {
        "containerStatuses": [{"ready": True}, {"ready": True}],
        "phase": "Running",
    }
}
not_running_kibana_pod = {
    "metadata": {
        "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
        "name": "logging-kibana-2",
    },
    "status": {
        "containerStatuses": [{"ready": True}, {"ready": False}],
        "conditions": [{"status": "True", "type": "Ready"}],
        "phase": "pending",
    }
}


@pytest.mark.parametrize('pods, expect_pods', [
    (
        [not_running_kibana_pod],
        [],
    ),
    (
        [plain_running_kibana_pod],
        [plain_running_kibana_pod],
    ),
    (
        [],
        [],
    )
])
def test_check_running_pods(pods, expect_pods):
    check = canned_loggingindextime()
    pods = check.running_pods(pods)
    assert pods == expect_pods


def test_bad_config_param():
    with pytest.raises(OpenShiftCheckException) as error:
        LoggingIndexTime(task_vars=dict(openshift_check_logging_index_timeout_seconds="foo")).run()
    assert 'InvalidTimeout' == error.value.name


def test_no_running_pods():
    check = LoggingIndexTime()
    check.get_pods_for_component = lambda *_: [not_running_kibana_pod]
    with pytest.raises(OpenShiftCheckException) as error:
        check.run()
    assert 'kibanaNoRunningPods' == error.value.name


def test_with_running_pods():
    check = LoggingIndexTime()
    check.get_pods_for_component = lambda *_: [plain_running_kibana_pod, plain_running_elasticsearch_pod]
    check.curl_kibana_with_uuid = lambda *_: SAMPLE_UUID
    check.wait_until_cmd_or_err = lambda *_: None
    assert not check.run().get("failed")


@pytest.mark.parametrize('name, json_response, uuid, timeout', [
    (
        'valid count in response',
        {
            "count": 1,
        },
        SAMPLE_UUID,
        0.001,
    ),
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
    check = canned_loggingindextime(lambda *_: json.dumps(json_response))
    check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)


@pytest.mark.parametrize('name, json_response, timeout, expect_error', [
    (
        'invalid json response',
        {
            "invalid_field": 1,
        },
        0.001,
        'esInvalidResponse',
    ),
    (
        'empty response',
        {},
        0.001,
        'esInvalidResponse',
    ),
    (
        'valid response but invalid match count',
        {
            "count": 0,
        },
        0.005,
        'NoMatchFound',
    )
], ids=lambda argval: argval[0])
def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
    check = canned_loggingindextime(lambda *_: json.dumps(json_response))
    with pytest.raises(OpenShiftCheckException) as error:
        check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)

    assert expect_error == error.value.name


def test_curl_kibana_with_uuid():
    check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
    check.generate_uuid = lambda: SAMPLE_UUID
    assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)


@pytest.mark.parametrize('name, json_response, expect_error', [
    (
        'invalid json response',
        {
            "invalid_field": "invalid",
        },
        'kibanaInvalidResponse',
    ),
    (
        'wrong error code in response',
        {
            "statusCode": 500,
        },
        'kibanaInvalidReturnCode',
    ),
], ids=lambda argval: argval[0])
def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
    check = canned_loggingindextime(lambda *_: json.dumps(json_response))
    check.generate_uuid = lambda: SAMPLE_UUID

    with pytest.raises(OpenShiftCheckException) as error:
        check.curl_kibana_with_uuid(plain_running_kibana_pod)

    assert expect_error == error.value.name