summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging_curator/templates/curator.j2
diff options
context:
space:
mode:
authorPeter Portante <peter.portante@redhat.com>2017-10-13 11:04:49 -0400
committerPeter Portante <peter.portante@redhat.com>2017-10-19 11:02:14 -0400
commit578ac5b348fa3e9c7d0d05e3a0f579839ecd79dd (patch)
tree72796eb1a2f540e9c179ced0bf3d23649b43b11e /roles/openshift_logging_curator/templates/curator.j2
parent70d7173aef356f834c1d4c7cd533170f13f9f665 (diff)
downloadopenshift-578ac5b348fa3e9c7d0d05e3a0f579839ecd79dd.tar.gz
openshift-578ac5b348fa3e9c7d0d05e3a0f579839ecd79dd.tar.bz2
openshift-578ac5b348fa3e9c7d0d05e3a0f579839ecd79dd.tar.xz
openshift-578ac5b348fa3e9c7d0d05e3a0f579839ecd79dd.zip
Use "requests" for CPU resources instead of limits
We now use a CPU request to ensure logging infrastructure pods are not capped by default for CPU usage. It is still important to ensure we have a minimum amount of CPU. We keep the use of the variables *_cpu_limit so that the existing behavior is maintained. Note that we don't want to cap an infra pod's CPU usage by default, since we want to be able to use the necessary resources to complete it's tasks. Bug 1501960 (https://bugzilla.redhat.com/show_bug.cgi?id=1501960)
Diffstat (limited to 'roles/openshift_logging_curator/templates/curator.j2')
-rw-r--r--roles/openshift_logging_curator/templates/curator.j215
1 files changed, 14 insertions, 1 deletions
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index e74918a40..e71393643 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -39,13 +39,26 @@ spec:
name: "curator"
image: {{image}}
imagePullPolicy: Always
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
resources:
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %}
limits:
+{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %}
cpu: "{{curator_cpu_limit}}"
-{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
requests:
+{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %}
+ cpu: "{{curator_cpu_request}}"
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
{% endif %}
env:
-