summaryrefslogtreecommitdiffstats
path: root/roles/calico_master/templates/calico-policy-controller.yml.j2
blob: 811884473b5c02a2c0e6b796b28953f8002d4cca (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico
  namespace: kube-system
---
kind: ClusterRole
apiVersion: v1
metadata:
  name: calico
  namespace: kube-system
rules:
  - apiGroups: [""]
    resources:
      - pods
      - namespaces
    verbs:
      - list
      - get
      - watch
  - apiGroups: ["extensions"]
    resources:
      - networkpolicies
    verbs:
      - list
      - get
      - watch
---
apiVersion: v1
kind: ClusterRoleBinding
metadata:
  name: calico
roleRef:
  name: calico
subjects:
- kind: SystemUser
  name: kube-system:calico
- kind: ServiceAccount
  name: calico
  namespace: kube-system
userNames:
  - system:serviceaccount:kube-system:calico
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: calico-policy-controller
  namespace: kube-system
  labels:
    k8s-app: calico-policy
  annotations:
    scheduler.alpha.kubernetes.io/critical-pod: ''
    scheduler.alpha.kubernetes.io/tolerations: |
      [{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
       {"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
  # The policy controller can only have a single active instance.
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      name: calico-policy-controller
      namespace: kube-system
      labels:
        k8s-app: calico-policy
    spec:
      # The policy controller must run in the host network namespace so that
      # it isn't governed by policy that would prevent it from working.
      hostNetwork: true
      serviceAccountName: calico
      containers:
        - name: calico-policy-controller
          image: {{ calico_url_policy_controller }}
          env:
            # The location of the Calico etcd cluster.
            - name: ETCD_ENDPOINTS
              value: {{ calico_etcd_endpoints }}
            # Location of the CA certificate for etcd.
            - name: ETCD_CA_CERT_FILE
              value: {{ calico_etcd_ca_cert_file }}
            # Location of the client key for etcd.
            - name: ETCD_KEY_FILE
              value: {{ calico_etcd_key_file }}
            # Location of the client certificate for etcd.
            - name: ETCD_CERT_FILE
              value: {{ calico_etcd_cert_file }}
            # Since we're running in the host namespace and might not have KubeDNS
            # access, configure the container's /etc/hosts to resolve
            # kubernetes.default to the correct service clusterIP.
            - name: CONFIGURE_ETC_HOSTS
              value: "true"
          volumeMounts:
            # Mount in the etcd TLS secrets.
            - name: certs
              mountPath: {{ calico_etcd_cert_dir }}

      volumes:
        # Mount in the etcd TLS secrets.
        - name: certs
          hostPath:
            path: {{ calico_etcd_cert_dir }}