From 92c95645afa582d4e90223698dd2806e04cbc849 Mon Sep 17 00:00:00 2001 From: Philip Haupt <“der.mad.mob@gmail.com”> Date: Fri, 24 Oct 2025 19:24:16 +0200 Subject: [PATCH] cilium 1.17.8 --- cilium/cilium-preflight.yaml | 293 +++ cilium/main.yaml | 40 +- cilium/src/values.yaml | 3850 +++++++++++++++++++++++++++++++++- cilium/src/values1.17.8.yaml | 65 +- cilium/src/values1.18.2.yaml | 2 +- 5 files changed, 4128 insertions(+), 122 deletions(-) create mode 100644 cilium/cilium-preflight.yaml diff --git a/cilium/cilium-preflight.yaml b/cilium/cilium-preflight.yaml new file mode 100644 index 0000000..82e0544 --- /dev/null +++ b/cilium/cilium-preflight.yaml @@ -0,0 +1,293 @@ +--- +# Source: cilium/templates/cilium-preflight/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium-pre-flight" + namespace: kube-system +--- +# Source: cilium/templates/cilium-preflight/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-pre-flight + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumbgppeeringpolicies + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + # To synchronize garbage collection of such resources + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch +--- +# Source: cilium/templates/cilium-preflight/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-pre-flight + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-pre-flight +subjects: +- kind: ServiceAccount + name: "cilium-pre-flight" + namespace: kube-system +--- +# Source: cilium/templates/cilium-preflight/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cilium-pre-flight-check + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium-pre-flight-check + kubernetes.io/cluster-service: "true" + template: + metadata: + labels: + app.kubernetes.io/part-of: cilium + k8s-app: cilium-pre-flight-check + app.kubernetes.io/name: cilium-pre-flight-check + kubernetes.io/cluster-service: "true" + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + initContainers: + - name: clean-cilium-state + image: "quay.io/cilium/cilium:v1.17.8@sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636" + imagePullPolicy: IfNotPresent + command: ["/bin/echo"] + args: + - "hello" + terminationMessagePolicy: FallbackToLogsOnError + containers: + - name: cilium-pre-flight-check + image: "quay.io/cilium/cilium:v1.17.8@sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636" + imagePullPolicy: IfNotPresent + command: ["/bin/sh"] + args: + - -c + - "touch /tmp/ready; sleep 1h" + livenessProbe: + exec: + command: + - cat + - /tmp/ready + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + exec: + command: + - cat + - /tmp/ready + initialDelaySeconds: 5 + periodSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - name: cilium-run + mountPath: /var/run/cilium + terminationMessagePolicy: FallbackToLogsOnError + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccountName: "cilium-pre-flight" + automountServiceAccountToken: true + terminationGracePeriodSeconds: 1 + nodeSelector: + kubernetes.io/os: linux + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - name: cilium-run + hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + - name: bpf-maps + hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate +--- +# Source: cilium/templates/cilium-preflight/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-pre-flight-check + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-pre-flight-check +spec: + selector: + matchLabels: + k8s-app: cilium-pre-flight-check-deployment + kubernetes.io/cluster-service: "true" + template: + metadata: + labels: + app.kubernetes.io/part-of: cilium + k8s-app: cilium-pre-flight-check-deployment + kubernetes.io/cluster-service: "true" + app.kubernetes.io/name: cilium-pre-flight-check + spec: + containers: + - name: cnp-validator + image: "quay.io/cilium/cilium:v1.17.8@sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636" + imagePullPolicy: IfNotPresent + command: ["/bin/sh"] + args: + - -ec + - | + cilium-dbg preflight validate-cnp; + touch /tmp/ready-validate-cnp; + sleep 1h; + readinessProbe: + exec: + command: + - cat + - /tmp/ready-validate-cnp + initialDelaySeconds: 5 + periodSeconds: 5 + env: + - name: KUBERNETES_SERVICE_HOST + value: "localhost" + - name: KUBERNETES_SERVICE_PORT + value: "7445" + terminationMessagePolicy: FallbackToLogsOnError + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccountName: "cilium-pre-flight" + automountServiceAccountToken: true + terminationGracePeriodSeconds: 1 + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux + tolerations: + - operator: Exists diff --git a/cilium/main.yaml b/cilium/main.yaml index e572492..da2394a 100644 --- a/cilium/main.yaml +++ b/cilium/main.yaml @@ -768,8 +768,6 @@ data: bpf-map-dynamic-size-ratio: "0.0025" bpf-policy-map-max: "16384" bpf-root: /sys/fs/bpf - ces-rate-limits: '[{"burst":20,"limit":10,"nodes":0},{"burst":100,"limit":50,"nodes":100}]' - ces-slice-mode: identity cgroup-root: /sys/fs/cgroup cilium-endpoint-gc-interval: 5m0s cluster-id: "1" @@ -781,7 +779,6 @@ data: custom-cni-conf: "false" datapath-mode: veth debug: "false" - debug-verbose: "" default-lb-service-ipam: lbipam devices: eth+ direct-routing-skip-unreachable: "false" @@ -790,7 +787,6 @@ data: egress-gateway-reconciliation-trigger-interval: 1s enable-auto-protect-node-port-range: "true" enable-bpf-clock-probe: "false" - enable-cilium-endpoint-slice: "true" enable-endpoint-health-checking: "true" enable-endpoint-lockdown-on-policy-overflow: "false" enable-envoy-config: "true" @@ -889,11 +885,9 @@ data: nat-map-stats-entries: "32" nat-map-stats-interval: 30s node-port-bind-protection: "true" - nodeport-addresses: "" nodes-gc-interval: 5m0s operator-api-serve-addr: 127.0.0.1:9234 operator-prometheus-serve-addr: :9963 - policy-cidr-match-mode: "" policy-secrets-namespace: cilium-secrets policy-secrets-only-from-secrets-namespace: "true" preallocate-bpf-maps: "false" @@ -971,8 +965,8 @@ metadata: --- apiVersion: v1 data: - ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRSUZ3eThzeXlxQ2loRTFRQUZPWkdqVEFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJeU1qSXlPVE0zV2hjTk1qZ3hNREl4TWpJeQpPVE0zV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRREY1YjJsR2tZcjJjNm53MVVlS3UvOWkxazU2ZzNITUZnSXZ4MS9TSks1L2VIek1zT1oKQ1FTdUg1QlhoNnpGbDlQTktFaXNNdE1LRjdsSmF1TWFpajFxY1lqcURkVlFQM1F3V2tOa1hPNndtNU02YlQ5agpPUEt5bXljTHNtTFEzLzE5NUtZME5KbkJMNWtLYit2UDdBeHllOGpGeUoxQWFLNHhqZHp0REMwZjM5VHJKaysvCjMxdjZqUnpZczRwL29rN1pyRDd3Yi9Td2dCdzJocGVuLzdhN0Z3akQzVDhxMUF2bDdESkQ5bzBwMm1QZFA0OGgKNnMxUHB3eUUvcWZ4QVovRm85YTBHdWs0dVlMTmJvRjBEVVNZS3dVOVU2SzZLMEp6OXF5Mjh3NUtKaGRDUnRJVgpWdGRGNDJMeUhNaVZmT3pBQzFKQllLSTgvWTdjUndrc3hIOS9BZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVV2dHJyVWhScHBuOTE3ZlBLc2JBLzVnZExFVUF3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKS09WRVpDSzlnQkxLaUxVZ1dTd2dybms4cWp6cHR5YUFlSEtsUUhIS1FNYXBycWdUZkVPRmQ4CkYzRUxwQmZYeHBRNTJBa3N4c0JjZ2ZJRnphclRrRU8yOHNCR0pheEhxbENaQVpBY2ZhSWVJdVR4TlNnQXNtV0UKV2J1bDduWGZmL2FsNXJsRWJnZXdiMVpzSVVaNGF4L3FJZzJFd3l5VGhNQ0lpdi9HZFBmY2NWZ2ZLVlJOdGJ4WQpaOWxXdTlNOVRSU0JsOUVtQTNuWFVsRnBnMmZVYSsxNzNpeWpGa05pRXh0RldzaXVlcUxOd3F4czY5R0pEVFh5CnpUVklWWUFWeWdmcFM1cTJ0eERKdzNlcWNDZUlvREljelI0eU1nSUdkcG1nWTdYZmZYOVI2aTd2UFNEcDYwRjUKZTRHWG54NUhtYjBYK2tueXRxZ1pTNWphV25ZVzNxST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= - ca.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBeGVXOXBScEdLOW5PcDhOVkhpcnYvWXRaT2VvTnh6QllDTDhkZjBpU3VmM2g4ekxECm1Ra0VyaCtRVjRlc3haZlR6U2hJckRMVENoZTVTV3JqR29vOWFuR0k2ZzNWVUQ5ME1GcERaRnp1c0p1VE9tMC8KWXpqeXNwc25DN0ppME4vOWZlU21ORFNad1MrWkNtL3J6K3dNY252SXhjaWRRR2l1TVkzYzdRd3RIOS9VNnlaUAp2OTliK28wYzJMT0tmNkpPMmF3KzhHLzBzSUFjTm9hWHAvKzJ1eGNJdzkwL0t0UUw1ZXd5US9hTktkcGozVCtQCkllck5UNmNNaFA2bjhRR2Z4YVBXdEJycE9MbUN6VzZCZEExRW1Dc0ZQVk9pdWl0Q2MvYXN0dk1PU2lZWFFrYlMKRlZiWFJlTmk4aHpJbFh6c3dBdFNRV0NpUFAyTzNFY0pMTVIvZndJREFRQUJBb0lCQUFtVlhkODZkcXJyVExQMQpBTkM3a3N6ayttRjMvZWExZFNDWmgyUEUramtuZUh3WmxmWlBHbFBBa0J5QmVSMWloQ1p0VUVPaWlxekoxaG56CmxPTlkzQ0RkNE1hTlViaXorUkZuN29JYlZxemxJRVRXSDZmaG0wcFFMSnljZEJIVEYvTWs5WEg1YmxzcDZteFUKUDRleTRCRnY1Q2o5bkh2a1o2VmJQL3JzeURINXRiOFdtbGpxbExQZlo0eENoQWlkRVE4L1N1UksvYy8vT2h5dgpITTN0RndyNmhlclRjTW5BZ0piS0JNR0NBMTlCOWxZRFNJY2NmTUJ2Z2tvZWVGNEVTWlJrUksyRVZtTVRBOHlFClFQRnB5YnZyYTZtNG1nVEdYL3NBMFBIczZkMXRkODVhTU1zbmFoeHdpSS9DdFYzVzVSOEpqd1lZeFR3VWllUWIKRkcyek5qMENnWUVBK2hBdlYvQWhndnl6VjBWaWtVUmZlMUk1U0ZXSXhEK2licDZweUgxN3dVbHdDMmFBRzZGSwpRWVFLWFAwOUhYZGcvMUYvODdYYldEZWlZTU1DWFBZREJMaVorNkRXN1RHUkJPeWdJS1RwT2l5UTNnTE0zdmhXClgwVHE5MEJJT1cyMjRnSFVsUVJodFExTDFyYjZPU3o0eXRuTjFZQjVRSjB4MWFLaFNpRjVQMlVDZ1lFQXlwaUIKcjRTOWhqczBKWjRlczZMbEtLZ0gydGRrTUlKUURaUmsxRkQrMWtNdExoTVYwZEN4NUlDZWlnUWhHZ1ZWRWJkeQo2cFFONUJ3Q0IvK1JkelBSVkJBRFMwb2JGVHpURWs3T3Q2NjVCcE9iY1JCSVpkWDAyOEpjVVRDT1pIclZQRzFoClEraC9YMEltK3BXMHZ2dFUrcjVGV2ZpVSt2cmNDSGtwbHpTb2J4TUNnWUFXc0FVN20wUFBHSkpFNXJ5c3BXRnMKVk9keGtxRVgvUllGcDB1TUx5WmRRUVp6WWxkWkttY01UcWlLQzBmU05wU01WWjZnUSsya3NWZUFJL0ZBOGVrUQpNcGQxNXYvWkFWaEE5QjhMRnlzNTVmVTJ6MVk3SEpqcGg5MHhsWmwzN0daZFZ3RXNpL3M1YS8xbmU2NTBKWXBsCkRTSEpyeUJSSm5IeENoSk1JWUZZcVFLQmdENlpaT2p3NFFiSzRGQUhUWUZBQWdMUCtLMWNCRzhndE0rZFhKSmwKL0hnRUJiL0kwZU1rMHZMQjh6cStEK1JVWlhhMThtSWRJVFdzbWN3UzRjV3RVeTdnUzhSRnY1eU9VTUhGYkFaQgpqSlpGb2JBTDBmTDFhMVZEOXZaM1JHZ3p0Qk9aMElZRkh1WTJNSWtaSE9JdUVwSHZuTW95RHdhRjROc1ZaUTU5CmpOWFZBb0dCQUtxOGcvcnNQVysraFd5Yy9URHZmMEEyL1N0UFhaemVKV1RsQWVHZHVYRjgrQlhYVWhLU2J6U2MKNFQrSDgybkVBRVR0UWpmK04vVjlwZ2dVOEszQW42NjBJVnY0OXBndWJRMGpuVHBGK05TdmpMc0EyRDNTUURBdQpvNVNST3M3NnFQaWRueEszSHlRRk1qQkhKbGk0OWVINFp2bkVaK21iWG11amdkSkxPbGhlCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRSVl0VE1lazQyektUb2pzV0I5Wm05ekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJek1qQTFOek0xV2hjTk1qZ3hNREl5TWpBMQpOek0xV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQzh2am5vbVJDL3M1VUlQQ0lrV29tcC9nWTJ3UFUwU0hIVjBzYytNRXk5TndqSy92czQKMHhkQkpTblhyai92aFcxY0RqUFZHN2hNeHZONUJtZEZZTUtIYitqREl3d0JmYkR3SVQ0SktVV0dDVi9FMVFkMwp4dHlrYUF0V2doMHFOd05Wb2VHZmZ5UDFnNHhhYStJcGRyTk03K2lSbGZ4ekQyUGx3L2RsVzZLQzFaY2ErK0VyCjdhTS82bnV3cm50ZWM5dmpJWnUxbTZvN3UxNUF0NW9GcmxVUEoyUUloS09sMUdtdUprV0NEVUtaZmRSZXdLYTQKOXpJKzBvVS8xV1RZVFJVV3k4bGFSTXNGL1FlNjZRc3BkM0JOZGxLVlhQWlgvNlgyUjdqZWNsRzdJT2FveVNLTgp2WHZwVkpBbFJFU3A2U2ZFNy9HVzJhbm4zaVBVMWkvYlhXSUJBZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVVQYU1uTDdVOEEza0RtYlVOay91OTBua3dIZWN3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKSlNXd0c3S0VzNzZjSFNkQVJpb2Y5VFVZK1VFK0U0TWJSMytPYmFFWlJ2bXZmMGZ4UlVIQS9RCm1VSW1RTGYvTHRlK2RDRGx5RmxFK0ZSazM0RUErT3V0eDRYczEwRElKRzY4bEFsbGRtTk1xNXRKQU5zV25DVW4KVVZTR0U0eTZwRkFxU3RzVE5neDBNeUQ3Y3cvMnQ1NlpzT2FxSW5TVXNKUXZZZWhYWGwwV1BORGpwZVhFcGNrSgpkb0k5Z3VReERHRTc3SUs5QmNvSXY2d1p2elVrSFRBUndGSWNIaC8zUjJvQ1lzTXZvN29Jd3Z0cHcybFMxU1JwCkFqcTRjejdtRzVxaC9MUE5vd1FZc0QwUDhaUG9XN0UxRTBGU3BCK0pVOTUzVTJNOW16ME5pUzJQa1VPeXl6dXQKYTdWUDZBb09VMjRHb3J3SEVCSWduaWpQMk0rYnNRVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + ca.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdkw0NTZKa1F2N09WQ0R3aUpGcUpxZjRHTnNEMU5FaHgxZExIUGpCTXZUY0l5djc3Ck9OTVhRU1VwMTY0Lzc0VnRYQTR6MVJ1NFRNYnplUVpuUldEQ2gyL293eU1NQVgydzhDRStDU2xGaGdsZnhOVUgKZDhiY3BHZ0xWb0lkS2pjRFZhSGhuMzhqOVlPTVdtdmlLWGF6VE8vb2taWDhjdzlqNWNQM1pWdWlndFdYR3Z2aApLKzJqUCtwN3NLNTdYblBiNHlHYnRadXFPN3RlUUxlYUJhNVZEeWRrQ0lTanBkUnByaVpGZ2cxQ21YM1VYc0NtCnVQY3lQdEtGUDlWazJFMFZGc3ZKV2tUTEJmMEh1dWtMS1hkd1RYWlNsVnoyVi8rbDlrZTQzbkpSdXlEbXFNa2kKamIxNzZWU1FKVVJFcWVrbnhPL3hsdG1wNTk0ajFOWXYyMTFpQVFJREFRQUJBb0lCQUJMdURVYkwyMGN1enJxMQp0VUxxSmJaNytNbVREc1RBbzFJcndybjFCQzFTRUxxeldpVDMzdlEwc3Y2anhsdlNpUVpia2sxRWEzYWYyWm1BCnorakFiS0pDMGhpSU1sTVA5U2dRWFhWend6cFBSR0NzY1FSTldLSFFvWnA2a0V4RW1qd0RjV3FheU5OOTd4eSsKaHo5YlhWNVJEWVZVZ3E1VGs5UmVsRGlCT1VwZno1SWJFbmJpRHJkZWhvU0MxVXVsS3JLbzlvTDBoSDBFNVpqOQp1bDMzRlMxWkdrQ3VXTm10NzB5OEtPaXBWcWtab2I5UVYxNE5kY094cU9pQjVwZGZnREJqS21tSTBaL0JuYVZkCjlObkU1WXFKci9NQWhXbnNablptMVpTY0xLMXNQN2dVY0NER21nM1lPWDBqUytxSTRUNlF1NEhnaHVJTGZ6QUwKUEo5T0tyVUNnWUVBME1IU3AzV202M3VoY3BMV2xQL0RreDVwWmh3NjRQNEV5YmU5Y0k2SlRML0EwSU5ydGFsbwpHWHRxdElSRDBxYk9ETDE5c3dwWVhZMFNJVTJJMGJsWHZUaUxqaGNrOU1sRlFwS1o1NjlTM0VCNUVabUtLbjhUCkNFMnAzc2JET201Vmc2Sm1RbmJIQ0d2UjB2L0FiN2hQQ2xhdG81WGorTjJGR2t4SU1rUUM3ajhDZ1lFQTUzVG4KeFp5MDRFdE9URUtZaWxHcGlIUXdvS0VXcHZ4T1MyV0Z4aFdHZ3dRYTF1Z3gxY3g4eWV2S3V3K3haUEtpNU91Ygp1Wml6S3h5NUZYWWFtZ0VCSDFJNHpPWENSNDBZZGR5U2dqOFIzeC9Bdi9heitRdzVtWFhIYU1jY21YQXpTdDFvClc5c0lrWFpWWkMxUUVhRk5BaEtLQ3VtMCtIeDh2aVpHT1ZDc0g3OENnWUFocjFUZGZxZUxrZXh3UDI5dXBZS2EKWjZyY05pdnVDQzhmbVkxdzd4OEtpbHFEaXVGRGMrMS9SeUhsdFAzNHJML000SHE2L0MxY0V4cGMwMVVEZW1QRQovYTNQSkw3cTNOdFhMYTYxNnQxMCt0Wk9WN2NxdWt5STUzZEVvay80U3J0enZTM0JCY2VCL1Z2akx5K1BGMjl4Cm5LRHlKNHFjcXFvQ24xSjdBZXh6SXdLQmdRQy9tUk8vanRCZ0Y1YVNKRkszdFkvOGlBbzAvZ1NOYUxDN1V5Z2cKNkhLNEErN2YxY2hqTG1waWtGRDY5cXVuWC8rZU5yZHJOTStrTVp1NCsxSmNCOWJSQWJSSlVTeTVKRUNLV0hQagpZNmc4cHNGZFp6Qm1Ta3RvRUlwbzN1SjYxZGx2OE1aUnU0aGxPYXJJU1laTmdkUUlraVk1a0dzaGc1T1RxSVZiCkxyKzlhd0tCZ0VJaXNHQkxSOTAvOWJ3cmx2NjI4bTlqblR3anRqbE9rODAvbzh4S2IyV0VQd2g0T0JKR0R4WUMKbG9TV2ljaTZqT0NKSE9QSkVYcjR5eDZmLzhnQzE1UHBqbXFVWTR0NFdSeTVMWDByaWpLNGxVd0dONkNBaFJNNgozNmN5cGZBK21pRzgrcXNuZkJQSXRHNDd1c2ZOQWlTdW54bzg2bG1KRHB5VXF1WU5ZNkNOCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== kind: Secret metadata: name: cilium-ca @@ -980,9 +974,9 @@ metadata: --- apiVersion: v1 data: - ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRSUZ3eThzeXlxQ2loRTFRQUZPWkdqVEFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJeU1qSXlPVE0zV2hjTk1qZ3hNREl4TWpJeQpPVE0zV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRREY1YjJsR2tZcjJjNm53MVVlS3UvOWkxazU2ZzNITUZnSXZ4MS9TSks1L2VIek1zT1oKQ1FTdUg1QlhoNnpGbDlQTktFaXNNdE1LRjdsSmF1TWFpajFxY1lqcURkVlFQM1F3V2tOa1hPNndtNU02YlQ5agpPUEt5bXljTHNtTFEzLzE5NUtZME5KbkJMNWtLYit2UDdBeHllOGpGeUoxQWFLNHhqZHp0REMwZjM5VHJKaysvCjMxdjZqUnpZczRwL29rN1pyRDd3Yi9Td2dCdzJocGVuLzdhN0Z3akQzVDhxMUF2bDdESkQ5bzBwMm1QZFA0OGgKNnMxUHB3eUUvcWZ4QVovRm85YTBHdWs0dVlMTmJvRjBEVVNZS3dVOVU2SzZLMEp6OXF5Mjh3NUtKaGRDUnRJVgpWdGRGNDJMeUhNaVZmT3pBQzFKQllLSTgvWTdjUndrc3hIOS9BZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVV2dHJyVWhScHBuOTE3ZlBLc2JBLzVnZExFVUF3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKS09WRVpDSzlnQkxLaUxVZ1dTd2dybms4cWp6cHR5YUFlSEtsUUhIS1FNYXBycWdUZkVPRmQ4CkYzRUxwQmZYeHBRNTJBa3N4c0JjZ2ZJRnphclRrRU8yOHNCR0pheEhxbENaQVpBY2ZhSWVJdVR4TlNnQXNtV0UKV2J1bDduWGZmL2FsNXJsRWJnZXdiMVpzSVVaNGF4L3FJZzJFd3l5VGhNQ0lpdi9HZFBmY2NWZ2ZLVlJOdGJ4WQpaOWxXdTlNOVRSU0JsOUVtQTNuWFVsRnBnMmZVYSsxNzNpeWpGa05pRXh0RldzaXVlcUxOd3F4czY5R0pEVFh5CnpUVklWWUFWeWdmcFM1cTJ0eERKdzNlcWNDZUlvREljelI0eU1nSUdkcG1nWTdYZmZYOVI2aTd2UFNEcDYwRjUKZTRHWG54NUhtYjBYK2tueXRxZ1pTNWphV25ZVzNxST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURTRENDQWpDZ0F3SUJBZ0lRRDVhcGM4NThYdDNzM1ZvcU9WcjEwekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJeU1qSXlPVE0zV2hjTk1qWXhNREl5TWpJeQpPVE0zV2pBak1TRXdId1lEVlFRRERCZ3FMbWgxWW1Kc1pTMXlaV3hoZVM1amFXeHBkVzB1YVc4d2dnRWlNQTBHCkNTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEdjhBQVdXNVlpS0plVm5CaHVIV1kyRFRSUE9BVUQKVnVTL1dRcHk3dW0rWUE2T1ZzanJrcUh3RVhpbTNyUkhGM0J5czlGcmRMemlvb2ZiTk1JZXoyakdyTkFIeGdLMgppV3JPdmE0bkdaVVNiaVRNRUh0cHczN2xabkdxMzhJRCtjYUNGaHBLWHk1UGlnQUVMWE9JOXMzakxqUVA1dUhxClVVN0ZDOWVjL1FHalVMMW5WVnJNdTA0bkgwaHQvNEtxd2RxNzgwVUliMEVua1YzOGhyVlkvRVZZcGE1VmVBK3UKWk05eVZZeVhHdXdZZ1RBZHdIYUhiTEhsdjdrdkdiYWQ2RTRDSDR1V1hvMFB1OCtVZFpRV2dGRWdhSDVCVGVaMApON1ptdHpGVDdzeGF6ZjZEMVRUeElnelBaNlNheFMxYkNEUGFOS1JBMmlReUY1SnNhUGlwRDhjSkFnTUJBQUdqCmdZWXdnWU13RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUYKQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWRJd1FZTUJhQUZMN2E2MUlVYWFaL2RlM3p5ckd3UCtZSApTeEZBTUNNR0ExVWRFUVFjTUJxQ0dDb3VhSFZpWW14bExYSmxiR0Y1TG1OcGJHbDFiUzVwYnpBTkJna3Foa2lHCjl3MEJBUXNGQUFPQ0FRRUFrZWs0bWJnM0dHb3VHVjJhUk5pRVR1RmRFcEptaHRjWEJPRW02VXdqS2dKc01EOGsKTWRzSFNCQ0J5TGJIc0VYaTlvVVpoYTB2SHhHdE0rYnRFL1gvK2pKNWFVelNUZC9MZ3lNSHBhWkN3MHpFRFp5RQozMFlHRjUrUlc5MTN3SWM2MDRENldBc0V0aDVPNHd4cWpXSExLNnNaS1BWbzVja0pVaFJUY3pKR25vOHdTbWZhCjNuMDYzYVA5OTRzNVJzWUQyL2JHWlZpWDlLRXdPRVJia1JIRzNRcGttajJYSHg1WStINWFueGVMQ2dJcmlJdVkKaDBUcmh3bm45Z1dXajUzdU1aakMrTTNHZ3hEcExqYXp4Tlg4WkNGaFJzN0dqWTNia1Rwc1RSYS9WL213ZXNvLwpBRklJaXY2cW50amFYQ2d4ZGNRVFQ1eUl3QWZRVHFXZXM2Q0JaUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBNy9BQUZsdVdJaWlYbFp3WWJoMW1OZzAwVHpnRkExYmt2MWtLY3U3cHZtQU9qbGJJCjY1S2g4QkY0cHQ2MFJ4ZHdjclBSYTNTODRxS0gyelRDSHM5b3hxelFCOFlDdG9scXpyMnVKeG1WRW00a3pCQjcKYWNOKzVXWnhxdC9DQS9uR2doWWFTbDh1VDRvQUJDMXppUGJONHk0MEQrYmg2bEZPeFF2WG5QMEJvMUM5WjFWYQp6THRPSng5SWJmK0Nxc0hhdS9ORkNHOUJKNUZkL0lhMVdQeEZXS1d1VlhnUHJtVFBjbFdNbHhyc0dJRXdIY0IyCmgyeXg1Yis1THhtMm5laE9BaCtMbGw2TkQ3dlBsSFdVRm9CUklHaCtRVTNtZERlMlpyY3hVKzdNV3MzK2c5VTAKOFNJTXoyZWttc1V0V3dnejJqU2tRTm9rTWhlU2JHajRxUS9IQ1FJREFRQUJBb0lCQUR4TGJtb3Y1OEY3dStQLwp6dS9VK1h0NXcveG9Vbkl0WXR4bTdzWkZIWDRXdTNHdmcxd1hoQlJFLzdISFl6dytPcWJJWTBjQ2xmako2U1AvCjFReFRDTHhzYnZhVkh0ZHFIdW9ISm4xeTQxQTNWNDNrVW04WWpvbnRQWUU0SzVRK0wxRS9acW5yMDBKdnBtTm0KWEpNOW1pMk56YzExTXE1a2NrdjRGdDIvZ3FyY1FhMFBCQ3NCUnBIRTJBZEIwNzI2NXlHTU1ITWcvQTlTK2lWMQptTldjN0JUZTFkNDBBWnhuNk1XU3BZTDFRdDRHMWcwTnVjWkd0MldxSFBPN0taWm1PVkdRa08ycU5ISzBrSnVmCjdqNUNGL1JuV2NHYXEzUzlhZFZtendJTHV1bm9DSXpOWThiZHIzcURJMlFyK20rNmZEaWlHSERwMkQxdFpPbXgKZzZpdWRTVUNnWUVBOER4MU1Ddng3bytkU3JUUTJWV0pHZU96Z1kvaDZabURjR0RmbzBHeGg0NkxwcEdWcHp0MAphbTUzUWJiT0xPcGRXOE5VNUxZVWlLU2hSRFplYS8yK2VHSEhTWWExOEVzTXkydXN1ODFwK3d0ZU1EMGRHUmNwCktjanorcEducnY1STlPYlUvTUJtSFVHRHN6enlnU1lBZk5XWDBaWjFIbEgrSzFvYlJQeU5wY2NDZ1lFQS82NkcKakw1a1I0OC9UeGkwWDhDM2FTN2ZiMnJwdUJ5VWZRTnlQeEsxek5MMmwrQlZJTlJvM0UrQUs0dCtTaFNERFpMZApmeklSdkJpU1JMRjVlTVNpSnZVc0RzclRmMXdaM1FBSXo5dUdnRU1JQUc5b3N1S1Z1c2t1K2YrT3hsbUpOajVpCkg5Z0w0QUcvbWdTWHRCZTY4UzdqYUo4eXdBZkdMRUFUTS9WWTdLOENnWUVBdWxGcFllN3pOOTVTVTNwZzcrNFMKNHUxaWJ2MjBiSVFQR0wxSk5Vamg4d3h1NnNUR21HallKU04vZ292UTQ2endzeGFQWk1LQ3NtZGNXNWIvTENvago2eUdLZWsvc0UyMWhndk1EaUxRTG1oUjYvRmFwbHFnTDJNaHQwTXZlalMzU1QzNG5lYytPWXVwSXhsNWloaDkvCkNWSnU2TElVSzJ2VUFMTmxxdDlrekVrQ2dZRUFsT3YyZXF1NmhPT3VvRWRuYUE0N3hnd0RJRVpKYzI5U0hhNlEKTUtTWk4zLzVHV1VVc0ZhSDdTcHhJT3ZTMXhzemFnaU04clViRjAvSWdqSk02a2E5bTZ0WnloUGhOR0ZVdnlCeQpBWDkveTkwL0grSEVQYzAwSU9ObGRUK2d6VzhCT0pNRlhEN2VTODg0NW1DZWE4c1dtN3dQd0xSWlB2aElPU1hwCkNXNXlLUWtDZ1lFQXc5MmhVR1pRZy94MWNja3VaRTZBQ1c2ZW0yZm5MWnVFVzlvMHBjLy9KSm5SZTc2NjZwb2wKYnZXOXlqZm81bnk0UTlzWjRXRXVldHYvbmRmRGZQOStSTlY5ZksvS0c1UThDbG1LZHVFWll2NHBoSEN5emE1ZwoycmY2RFBWamlvSi9HRmtWNzVjYWxldVE0TkxuVFk2akZCU0x2QUNBREFUcEt2UzVKaG5KOUt3PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRSVl0VE1lazQyektUb2pzV0I5Wm05ekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJek1qQTFOek0xV2hjTk1qZ3hNREl5TWpBMQpOek0xV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQzh2am5vbVJDL3M1VUlQQ0lrV29tcC9nWTJ3UFUwU0hIVjBzYytNRXk5TndqSy92czQKMHhkQkpTblhyai92aFcxY0RqUFZHN2hNeHZONUJtZEZZTUtIYitqREl3d0JmYkR3SVQ0SktVV0dDVi9FMVFkMwp4dHlrYUF0V2doMHFOd05Wb2VHZmZ5UDFnNHhhYStJcGRyTk03K2lSbGZ4ekQyUGx3L2RsVzZLQzFaY2ErK0VyCjdhTS82bnV3cm50ZWM5dmpJWnUxbTZvN3UxNUF0NW9GcmxVUEoyUUloS09sMUdtdUprV0NEVUtaZmRSZXdLYTQKOXpJKzBvVS8xV1RZVFJVV3k4bGFSTXNGL1FlNjZRc3BkM0JOZGxLVlhQWlgvNlgyUjdqZWNsRzdJT2FveVNLTgp2WHZwVkpBbFJFU3A2U2ZFNy9HVzJhbm4zaVBVMWkvYlhXSUJBZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVVQYU1uTDdVOEEza0RtYlVOay91OTBua3dIZWN3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKSlNXd0c3S0VzNzZjSFNkQVJpb2Y5VFVZK1VFK0U0TWJSMytPYmFFWlJ2bXZmMGZ4UlVIQS9RCm1VSW1RTGYvTHRlK2RDRGx5RmxFK0ZSazM0RUErT3V0eDRYczEwRElKRzY4bEFsbGRtTk1xNXRKQU5zV25DVW4KVVZTR0U0eTZwRkFxU3RzVE5neDBNeUQ3Y3cvMnQ1NlpzT2FxSW5TVXNKUXZZZWhYWGwwV1BORGpwZVhFcGNrSgpkb0k5Z3VReERHRTc3SUs5QmNvSXY2d1p2elVrSFRBUndGSWNIaC8zUjJvQ1lzTXZvN29Jd3Z0cHcybFMxU1JwCkFqcTRjejdtRzVxaC9MUE5vd1FZc0QwUDhaUG9XN0UxRTBGU3BCK0pVOTUzVTJNOW16ME5pUzJQa1VPeXl6dXQKYTdWUDZBb09VMjRHb3J3SEVCSWduaWpQMk0rYnNRVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURTRENDQWpDZ0F3SUJBZ0lRWmM1b1RMUnJHNVBwYTZNN3AvSzc5ekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJek1qQTFOek0yV2hjTk1qWXhNREl6TWpBMQpOek0yV2pBak1TRXdId1lEVlFRRERCZ3FMbWgxWW1Kc1pTMXlaV3hoZVM1amFXeHBkVzB1YVc4d2dnRWlNQTBHCkNTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDYUk3RG5BMTlGZCtmRmtEdEE5MGpEN2M2ZG1GKzEKTFNvblM4VU5lRlNHUlpBK3pmT3BSM3RKSGhlTGM4QjhtQVNwOFNKS09UbURFaC9Kdkk2QTJZTE8yZ1BlM0lzYwpOZDdYeitmL0tpV3NTZmc3aG4zM0ZkOThGNG9Kc3BwRk9uRE9DMFAxYnRweU1GdUxuYkNtM3ZHZ2FNV3VaM045CnNmVmtRU2dMa2hUQkhaYXlJc2ZSWTlrYVE0dW9NV2luaFNPdzR0bi9sSDYxcEo1U1RjUkY0aDNWa1A0QlVGYVUKaHRrRXRFQVhwZUlHeWR4aDZXWWY3WWU5WTVQeWFaR3M5aFVYbHNkNzIxYm9IcE5qU3JlMFY5RFhlb0F2RDlRSworUzFZWWZjY1kxTWp2dlIyZEpIN3AxRm5LODR3OG8rK0RxQ3lvMVYzbHNyRWh1NnB3UFVxMHBoUkFnTUJBQUdqCmdZWXdnWU13RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUYKQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWRJd1FZTUJhQUZEMmpKeSsxUEFONUE1bTFEWlA3dmRKNQpNQjNuTUNNR0ExVWRFUVFjTUJxQ0dDb3VhSFZpWW14bExYSmxiR0Y1TG1OcGJHbDFiUzVwYnpBTkJna3Foa2lHCjl3MEJBUXNGQUFPQ0FRRUFuakNmeGl0RlBONG5FSGZVS3dFelg2SWJpQ0xuenpSb0FxbXFTd0tsSDZDNzU3a1UKR2hrUXBYTC9OYWJnSEtQNm8vUnFlVlg4QkFXUGRyS21iRDEyMHZVZFZQWjFRbjJVY2lGYzRWUXJVRWhpN3J4aAoxazBwT081dFU5OTBlaWc2SGlEeE9hckVJZmI5cWpyS212MENBcFBiRXlpckNDczMyaEV4RzRFTVRLSFRBRTd5CnIybUFYWXA2QzdjZEgvRk1ETVpEcXpkdUpOT01nZGxzcHZtS0hEZXJLZlBuMEV4ZFkyaG5pY3U4TGZNUEJJU2QKVFF5bXNvWmJZcjJQbnFtaUZnckVpWnlWOEpHUkRyNnFJU0Y5VUhQOCs5WTI5UmhNT0ErUkVHaDREdGpVZmVJUApaTnlFZzYyUll1NnpjM3h3d2dyZXRteVJnazN6aW1EOFZYQzZrZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBbWlPdzV3TmZSWGZueFpBN1FQZEl3KzNPblpoZnRTMHFKMHZGRFhoVWhrV1FQczN6CnFVZDdTUjRYaTNQQWZKZ0VxZkVpU2prNWd4SWZ5YnlPZ05tQ3p0b0QzdHlMSERYZTE4L24veW9sckVuNE80WjkKOXhYZmZCZUtDYkthUlRwd3pndEQ5VzdhY2pCYmk1MndwdDd4b0dqRnJtZHpmYkgxWkVFb0M1SVV3UjJXc2lMSAowV1BaR2tPTHFERm9wNFVqc09MWi81Uit0YVNlVWszRVJlSWQxWkQrQVZCV2xJYlpCTFJBRjZYaUJzbmNZZWxtCkgrMkh2V09UOG1tUnJQWVZGNWJIZTl0VzZCNlRZMHEzdEZmUTEzcUFMdy9VQ3ZrdFdHSDNIR05USTc3MGRuU1IKKzZkUlp5dk9NUEtQdmc2Z3NxTlZkNWJLeElidXFjRDFLdEtZVVFJREFRQUJBb0lCQUVoWklrcDhTYmFJbmxBYgo3OFFCamJUeEpQN1JxM0tWdldodVp4SjRmV3Y4c01UWjFGbUxSTEhBSHhzRWZMKytkc254ZGIvUzlreXdNQjEzCmxJZWtFSUxZeitKR01FRlFnOFZXUDdXRDZlVGhYMU5KUUV5K2hvUnZaQXBETzZmczAwL1M5OVgyYnpBVHhKUlcKc3JqUWdpZGhwZy9EWFBFTm0zbXdINFMyejJrSXFObGdVMnhrWTFzMFQzYkszTG9NOWpsWEczekovZm1HbXlGagpqUndleDZhZTIvYWl2MlpHakp5M1V6bVdVNUdsRmEvZzM4OVdvSEw5Q2xGRUh4NDNwWXJZTHVaTGNzaUovSEpmCk5ucFg5RFZrUFpzdmpiZnhjaDVmNG5laWJzWjFUUjlPM2JweGNCM1V2NE5RcitWcE1LS1JLT0ZMSTdzOUxKRVYKZFo5dS9FVUNnWUVBeVZEbWsvRTRYNjVVbTYrcnRqNU1WVzlsN2w0cythSGxtZTd4RjhFME5rTFFZblhBdXVxUwpjd3dhTk9PMmdTa1FKSW11cDJZb3lZZ3JMT2NZRnBJM2xWUEZvKzVobk96dk4xUUtWZDQwQll5cSt1dndRQ3kyCjdIeDVZOE9BUVo1UytYZ25MSG5iZlN1dTVUUllGOGJJTEdtMHRsSkx3U2p4dXhLUFBIVEhnRjhDZ1lFQXhBSTYKS0RPeGVQbkJVM1pQVzFZWGVROEU3SGFza0IvdThweWxnNkpJU3NkNExLVzZlOTl2Q1J3SHJFV2ZvQXgzU1pFdgpyL2YwOXJnUkRXK3VRUHpQb05NMC85aW4xTHFiUDhMU3NDdEJobHM2QlNWMENtb0hlelVuOFR2MzFEaDNMV1ZoCnEwMk9ZOE41WjFGT0t5OHFwQmI3MjgxMzc0dzJZMVZEN0xlejVVOENnWUFYUUJteFFtM0JWeS84WHhpVXpyTWcKTnVKdDJrQ1ZnQ1kxVmd4UU13Y2xzU2ZDQjFsLy9QRjNDaEhJdy9makNPd05YRDFvcGZyVjg3MFF6WXZScDRkMQozcHYzNStNc0xZMGZOYnlQMkQ3bjJTd3lHS2ZCc2FoYXZiR0RYU1BsOERRakRjWndjNDV1OFdtY25TUURjdmpSCmNFL25zcTkvR294NGo1Y3RZMHRiUHdLQmdESWZlNjNDSWxVVVVScTl1MmRZNmFHOGRIN2ZqaHRBLzBLdFNpb1MKT3JoY3I5K0M5ZUF0YitCOWVYelZSUStSZ0lPZ2kvaE15U2k2UXlvVGNCVVFVU3dJTnBwaVZmQ3hVM2dIb3djSAoyTGE1NTJsZmZZQVlReGQvclZva0Fxa1RVMjZaYVZONy9yaUdTU3BoZ1VMTHlmU3lKKys2YUR3cXQ5SEpBUmlzCmxXVkRBb0dBR0pHUWZwcjNVVHhwelMydmg0NjFUTS9DcnhhZkRWTGtIRFZwUHJrL2VoRU82dzhhRlBIZ1FFRW4KbTc4L2VLS290NUowdnVhaDA1S0tiaFVIS0I3Zm5zaGNqbmhCOVNTUVVERG1nOUp6aDVoblNIUjBvT1lScTcyYwpnYUVvZSs5M1JibEVjZjNXaWlBU1NnWVRqTXVjWFc2enZraHNHQWtVblR6aGJKNWk1Mnc9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== kind: Secret metadata: name: hubble-relay-client-certs @@ -991,9 +985,9 @@ type: kubernetes.io/tls --- apiVersion: v1 data: - ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRSUZ3eThzeXlxQ2loRTFRQUZPWkdqVEFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJeU1qSXlPVE0zV2hjTk1qZ3hNREl4TWpJeQpPVE0zV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRREY1YjJsR2tZcjJjNm53MVVlS3UvOWkxazU2ZzNITUZnSXZ4MS9TSks1L2VIek1zT1oKQ1FTdUg1QlhoNnpGbDlQTktFaXNNdE1LRjdsSmF1TWFpajFxY1lqcURkVlFQM1F3V2tOa1hPNndtNU02YlQ5agpPUEt5bXljTHNtTFEzLzE5NUtZME5KbkJMNWtLYit2UDdBeHllOGpGeUoxQWFLNHhqZHp0REMwZjM5VHJKaysvCjMxdjZqUnpZczRwL29rN1pyRDd3Yi9Td2dCdzJocGVuLzdhN0Z3akQzVDhxMUF2bDdESkQ5bzBwMm1QZFA0OGgKNnMxUHB3eUUvcWZ4QVovRm85YTBHdWs0dVlMTmJvRjBEVVNZS3dVOVU2SzZLMEp6OXF5Mjh3NUtKaGRDUnRJVgpWdGRGNDJMeUhNaVZmT3pBQzFKQllLSTgvWTdjUndrc3hIOS9BZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVV2dHJyVWhScHBuOTE3ZlBLc2JBLzVnZExFVUF3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKS09WRVpDSzlnQkxLaUxVZ1dTd2dybms4cWp6cHR5YUFlSEtsUUhIS1FNYXBycWdUZkVPRmQ4CkYzRUxwQmZYeHBRNTJBa3N4c0JjZ2ZJRnphclRrRU8yOHNCR0pheEhxbENaQVpBY2ZhSWVJdVR4TlNnQXNtV0UKV2J1bDduWGZmL2FsNXJsRWJnZXdiMVpzSVVaNGF4L3FJZzJFd3l5VGhNQ0lpdi9HZFBmY2NWZ2ZLVlJOdGJ4WQpaOWxXdTlNOVRSU0JsOUVtQTNuWFVsRnBnMmZVYSsxNzNpeWpGa05pRXh0RldzaXVlcUxOd3F4czY5R0pEVFh5CnpUVklWWUFWeWdmcFM1cTJ0eERKdzNlcWNDZUlvREljelI0eU1nSUdkcG1nWTdYZmZYOVI2aTd2UFNEcDYwRjUKZTRHWG54NUhtYjBYK2tueXRxZ1pTNWphV25ZVzNxST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVakNDQWpxZ0F3SUJBZ0lRWGNyL1l0cFFVQS9uTC80Vlc4RjVYakFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJeU1qSXlPVE0zV2hjTk1qWXhNREl5TWpJeQpPVE0zV2pBb01TWXdKQVlEVlFRRERCMHFMblJoYkc5ekxtaDFZbUpzWlMxbmNuQmpMbU5wYkdsMWJTNXBiekNDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTjJ5RTZuWVNyV2NUTXU4NUpPaEljZzIKSW9FNTZqNmV3MFJOT2F4U0I0VFJhL1NyUW93Vk9jVVUxaEk1NmpzeWVTVWcxOFdUSlJwVWNpd3V6WFNOVEVLUQo3ODNPOFNiczVXcFFDaWJVWUVOTGJQWnUvVDZSelIxaGo4M2g5NWVyODM2Zm4vUWRKOVhuVmZPTkZIaG1USkhGCmJEaFhpME0xdEZoYVBSWmRLc3h2RThXMExkWWIwMTNrVVp6ZWFUWEk1cmhsSzFJZXdTbitiVUJFTEV1dGloNEkKY0xTQzhMd2hqNUdDTFdjR01FNnVRR2F3TnNCdHhaQ21MeXZaL0cvNlRzV0xEOXRka2VpK2JBWWJSVzJ5WGpjdgo0UUtzcDYwKzBhV0ErazBUWW9EZEpFdFowNkovWitUTE9HZ3pES3haaWx6bExpTkl4Y3ArWmdBT2hCVGNHU2tDCkF3RUFBYU9CaXpDQmlEQU9CZ05WSFE4QkFmOEVCQU1DQmFBd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUF3R0ExVWRFd0VCL3dRQ01BQXdId1lEVlIwakJCZ3dGb0FVdnRyclVoUnBwbjkxN2ZQSwpzYkEvNWdkTEVVQXdLQVlEVlIwUkJDRXdINElkS2k1MFlXeHZjeTVvZFdKaWJHVXRaM0p3WXk1amFXeHBkVzB1CmFXOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQTl5OFhXdDl5WHFUb2hMZHBVckV1T3hHN1N5Vmlhdm1tQmcKcEhZUi91WlYxa1pNdElJVFVOMXlvNWlwZmgxeVZxTElqSldXcjVjUHRHVFBsTk15cVFCck1BQmpuZzBNYlE1QwowMjd5elg5eDJ6ZTBIYkFnKzdXWWh3WFVIeGNQdGdrK3VmYmt3d3YyaEhYYzgwQUxaN2k3aWh4clBlSHcvcWg0CkJycS9rMEZZY1dDWW9WMk81STNYTHZydE1qQTFSRER6ZWJqY08ydzJJcVBzR3R1TzFUQTdzZUlkZ1pxd1V1aUcKQmhLdkczY1ZrMXhaUit5L3lNNkRVUUtiWmNTSnJTc2JhNnhpdVdNeTYrK0VDb3hLTE9ydmdIYmthd1RlVk9LdQozOTJkMnRSblh5aTVOc0ovRCtVR1ppMDhZZFpyeGErTGNKMG0yNWRucnRQdTJqaHlTUU09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBM2JJVHFkaEt0WnhNeTd6a2s2RWh5RFlpZ1RucVBwN0RSRTA1ckZJSGhORnI5S3RDCmpCVTV4UlRXRWpucU96SjVKU0RYeFpNbEdsUnlMQzdOZEkxTVFwRHZ6Yzd4SnV6bGFsQUtKdFJnUTB0czltNzkKUHBITkhXR1B6ZUgzbDZ2emZwK2Y5QjBuMWVkVjg0MFVlR1pNa2NWc09GZUxRelcwV0ZvOUZsMHF6RzhUeGJRdAoxaHZUWGVSUm5ONXBOY2ptdUdVclVoN0JLZjV0UUVRc1M2MktIZ2h3dElMd3ZDR1BrWUl0WndZd1RxNUFackEyCndHM0ZrS1l2SzluOGIvcE94WXNQMjEyUjZMNXNCaHRGYmJKZU55L2hBcXluclQ3UnBZRDZUUk5pZ04wa1MxblQKb245bjVNczRhRE1NckZtS1hPVXVJMGpGeW41bUFBNkVGTndaS1FJREFRQUJBb0lCQUFOS0dUZ1gycE5EY0lhYQpYMzJLOG04ZjBkY1VocWtBcytRdmVDZXZmT2M5TUpWTUN1YW5YdmpSWkdsc3AyWW9TM3RmOTNaUFJyUStEMXYxClRYenovQm5Gd0VVbEU2ekY3dWtyQUZ6a29hYXdvbVhZQTVVYUsrUUtkcUdHLzQ4N2U0SUVEaWNjc3FLemRoeWIKeElpOUl5MkxpTWhLTzVSVzk1TFRDSjBzYXpqbXl2UzE4UFNWUWNiZzUwdTRKUWJSVjNDazBoRTVkcSsvNGZUTQp6bVFWQS9DbEtnRDhmRjdPbTlSVGFBV05UVllHTVFFcDI3Wm8yQktiOVdieG9DZkRqMHpwL2Y5c1N0MURBVE16CjJzM1BTWUw4VkpQZVl4SGhUL0NiVzlGckZLVUtPQkNXV1R0K04rbVJUTjVyMEVoN1VuNTZBVDFYeUw5WW42MnoKZWxPZFNPRUNnWUVBN3ZTQmpZSXZHTURvK3ZhTUNicTRTQlRiQ0RGemFjY3lHUG9vOWRmcU1tZ2grQTcxZEZ5ZAo5YlhJTy9oLy8vWGdLaGV3TGFSQ0J5SWRuUnlkZ2ZvYnV6UUxWWHZXcXBqRXphM1REYVVnVXFIVjhlWWt2b3l0Ckp3bk5BUXEyOVFsRm1pTTdBRDl4MjU1ZjBQYWJZUy9vVVl5aVJmVHgwQWRTTTI0UGtDa1dYemtDZ1lFQTdZSm0KWVJoREVtQVR0Y2xVVGZ2VnBhUWJjaWo1MFJhY0RlUWFsNDh3V1Z1SHZ1QkRRd2xlQWFUK1hZNDBzZXJYbWRMegp5RjFzUlFQQUxud0xNMHlYSmxmM0RYbjVZLzhVRzd3Rm5HcHowVEw2RnhNbWlyNVEwZUNlOUZhQld2d3p0c2xrCkZzMDU0M2lVcWthY1lISFJlTzFEcVdsSU55dW4xaFIzZWJpcm1YRUNnWUJnUlpvOGpRcTBpbVJUV0NtcmNkTDEKRjVIM2h4VGN0U2gxNzdPNCtMaWVmbDNSTnhXbFAvdFVGMTBJN0dHRm1tWXFEMGtuMmxuUHplcm9kYkx2dnZpVgpQNjZoVGpkNm0vb09jODNvWFhMY1Y5OStpaDZidHNtOFJMS1hqTjJSYzU3Ykx2NkNadzFVVWthQVJocnIyUVZjCmRiQ2JSc2gxNjcwdWcxVTUyNVByNFFLQmdRRHBVOFMrT0RhVUU3enBScXRDblZmTERVaEZRNG9BWmVUVEpMY1gKRTMxZEc3Vmt5QzVyNmdUbk9xOEZPOUg2dzRvaFBRTnZZSkFmME1wNUlEVmtwU2FNYmJod0RkYjU3THBWY01aVwpYcUgrNzhTTzdLcGRmUkFxenk5NVBJNkIwdWYrOXlRVDNuT3pTMTlKVkdzRnVHc2dFSDc3TEVuY29VK1YyYTl4CjdnRHVrUUtCZ0QxWEpEckdacjgveGtJa0w4WXJVNmNtS051MjlCZmUxNlR1azdCSGhqeDZHVmhzaHVreXlZV2wKTHEzbWJUcGdWeTF4aXFSVUhaQWxGcThPdEl4ZlNuYk4zanEyZTZ6SW4vM1lMeWpRd1hHSlpaWFdqZkJORU9DZgoyajdreGs4RGxyV1pIYmFJM1J0YlVoTXVpeDN1NFdxVTBCaFdndWJ6ZXc1K0xnSDBHODFqCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRSVl0VE1lazQyektUb2pzV0I5Wm05ekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJek1qQTFOek0xV2hjTk1qZ3hNREl5TWpBMQpOek0xV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQzh2am5vbVJDL3M1VUlQQ0lrV29tcC9nWTJ3UFUwU0hIVjBzYytNRXk5TndqSy92czQKMHhkQkpTblhyai92aFcxY0RqUFZHN2hNeHZONUJtZEZZTUtIYitqREl3d0JmYkR3SVQ0SktVV0dDVi9FMVFkMwp4dHlrYUF0V2doMHFOd05Wb2VHZmZ5UDFnNHhhYStJcGRyTk03K2lSbGZ4ekQyUGx3L2RsVzZLQzFaY2ErK0VyCjdhTS82bnV3cm50ZWM5dmpJWnUxbTZvN3UxNUF0NW9GcmxVUEoyUUloS09sMUdtdUprV0NEVUtaZmRSZXdLYTQKOXpJKzBvVS8xV1RZVFJVV3k4bGFSTXNGL1FlNjZRc3BkM0JOZGxLVlhQWlgvNlgyUjdqZWNsRzdJT2FveVNLTgp2WHZwVkpBbFJFU3A2U2ZFNy9HVzJhbm4zaVBVMWkvYlhXSUJBZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVVQYU1uTDdVOEEza0RtYlVOay91OTBua3dIZWN3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFKSlNXd0c3S0VzNzZjSFNkQVJpb2Y5VFVZK1VFK0U0TWJSMytPYmFFWlJ2bXZmMGZ4UlVIQS9RCm1VSW1RTGYvTHRlK2RDRGx5RmxFK0ZSazM0RUErT3V0eDRYczEwRElKRzY4bEFsbGRtTk1xNXRKQU5zV25DVW4KVVZTR0U0eTZwRkFxU3RzVE5neDBNeUQ3Y3cvMnQ1NlpzT2FxSW5TVXNKUXZZZWhYWGwwV1BORGpwZVhFcGNrSgpkb0k5Z3VReERHRTc3SUs5QmNvSXY2d1p2elVrSFRBUndGSWNIaC8zUjJvQ1lzTXZvN29Jd3Z0cHcybFMxU1JwCkFqcTRjejdtRzVxaC9MUE5vd1FZc0QwUDhaUG9XN0UxRTBGU3BCK0pVOTUzVTJNOW16ME5pUzJQa1VPeXl6dXQKYTdWUDZBb09VMjRHb3J3SEVCSWduaWpQMk0rYnNRVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVakNDQWpxZ0F3SUJBZ0lRTzN5WUZQenNlajFVNEVMZjZhK2JZekFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TURJek1qQTFOek0yV2hjTk1qWXhNREl6TWpBMQpOek0yV2pBb01TWXdKQVlEVlFRRERCMHFMblJoYkc5ekxtaDFZbUpzWlMxbmNuQmpMbU5wYkdsMWJTNXBiekNDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBUFNublFOckN2YWJCQ253blVyWWQ0U2UKcnVnTThrZVlrVkFFT04yRXMrMXp0Yzg5RDBETm9HdW9sN0NlZ3NROWo2OUR5Y1owZTRWZDJJZXRtbjZJRDlSZApaakIyVzlvOGV3NTJIRFByMTFKN2RVRjBXUjBUYThOWEcwNzZJbDZlanNRaHp6M1pqZmdDR0t3UDNzOEdvMkdBCm5YejBxNUZZMU1tcm9ULzVFVEZPMFRROVZCTHU0cjZReEk1dk1QTnFJZVAvSjJ1MGVrWlhDNXVpRS9uVzFjbCsKK1NxYnVJNEZPRkhBeHE4SlRuNWlSQnJzN3hWL09HZ0twdjVIcC9TZWE0ZzdJYlhXOWRkWmt2YnFXZWkrUWRPUwpycW9odmpzdGtNUHpYdWJ3V0I5YkxTdXl3T3gwQWw3ZGVPT3J5Qmd1eEpsN0ErTzhoVGlLNEg1bkRNUytXMmtDCkF3RUFBYU9CaXpDQmlEQU9CZ05WSFE4QkFmOEVCQU1DQmFBd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUF3R0ExVWRFd0VCL3dRQ01BQXdId1lEVlIwakJCZ3dGb0FVUGFNbkw3VThBM2tEbWJVTgprL3U5MG5rd0hlY3dLQVlEVlIwUkJDRXdINElkS2k1MFlXeHZjeTVvZFdKaWJHVXRaM0p3WXk1amFXeHBkVzB1CmFXOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBSDdZSFJobkZ2T0paWEZTUGlRMGVPSzVLZEFSU0ZsQi9OQjUKc0tvZWROUHowMEk3bUdvNnl4bktqYTA2ZHRvRmVHVHJIUWVWOHNqRFRnWkQrZzhmU3BMSjhrZ0FVSk4zZ0tlawo5dDdCYnVKUHNWRzEwSHBkeStUeDBVMnN0TTcyM3d0NWRSaDkxN2xzL0R2Tkl5Z0FKMndKZm1kK1Arc2x3UFJDCmU5dHZkd0lnTTMvalRZRGxIOWh2bW5EYnpnamZ5LzNlUEJJSDVRWW9JcDQwb2orOEhSN3M2anp3OG9RZWN4OFcKWStLaXY0d1FZK3c3OTdDYUNOSGJkVXlRZGtQcEpsdHBKOXQrSVU4YlZBQnU2dDlmK3NvRUFkdVMyR2FEYXlsdgoxWlpoWE55THhVdUdRSklGTWx5dGdSRU9OdEFwZXp5U0ltNFZPc3hicGkzaGt0c0U2SWM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBOUtlZEEyc0s5cHNFS2ZDZFN0aDNoSjZ1NkF6eVI1aVJVQVE0M1lTejdYTzF6ejBQClFNMmdhNmlYc0o2Q3hEMlByMFBKeG5SN2hWM1loNjJhZm9nUDFGMW1NSFpiMmp4N0RuWWNNK3ZYVW50MVFYUloKSFJOcncxY2JUdm9pWHA2T3hDSFBQZG1OK0FJWXJBL2V6d2FqWVlDZGZQU3JrVmpVeWF1aFAva1JNVTdSTkQxVQpFdTdpdnBERWptOHc4Mm9oNC84bmE3UjZSbGNMbTZJVCtkYlZ5WDc1S3B1NGpnVTRVY0RHcndsT2ZtSkVHdXp2CkZYODRhQXFtL2tlbjlKNXJpRHNodGRiMTExbVM5dXBaNkw1QjA1S3VxaUcrT3kyUXcvTmU1dkJZSDFzdEs3TEEKN0hRQ1h0MTQ0NnZJR0M3RW1Yc0Q0N3lGT0lyZ2ZtY014TDViYVFJREFRQUJBb0lCQUFGTjJNQnBQdjIyMnliVAphWU01OUNramVoQ0g2ODFiT3RXNldyTStQeXljMlVpVUxXaWpCVXFObml3SzF1enZoeEhnRi9YcGc3UmZubHFGCmRRcXlTYU5RSnMzMS9qZFk2eThBak5YdldUa2txZjRCL2hlY1FEbDNxcnJ1WDdzeFlvRnVkMTcvc2Q5ZDQzNXgKOXpZVFN2dW5ab1RXcVFqS294ZTZ4U1B2ZlVWT0FPbXlkNG16Vk5uOGRpS3lQVzhYcVFwVk9selI4RHdmbUNKOApvYWFwN2NwbTBFclA5MEFaMXRFZEltUXZmaXdmdFMrSDVTdHlha2ZBRUlLalhiclZtd1NiZmI3YVNuMGRsL2wzCklKUkRCemtCcU9vdHkzeTBEVTUzVTdKeUl3YzNHRjY2VlVWblBLZnArTXgzVG44d1REZ1cvbUxHY2NpYnEyWjIKc2VCTkdxRUNnWUVBL3BGVjhBUnpDS1grNTR3NzlUclVFOWZIYUFlN1hqclFtcXB2bkRCOVVsZG9obld3bFhLUgpBRC9DK3BwTDhDaDI3UHFEZVByM3lmZVNnSlY0TTI4VGtUUnFza0UrZFI2eFlvWEpzR2lPZjdjQWR3UDZIaGdWCllTN1E0RWUxTFhIaWFVdTRIZDEwWHZrcDJ2QnM3WlJESzJYMWdONktrdzcwMWxDS3Exd1NicWNDZ1lFQTlnZi8KNCs4UmJxQm15Q0V4THdUNW55YmtMenV4QWh0SXNkd2QyUWtyUWJ6aHFpL1lDYWRibTRaTlRndUt5UjdORVZWYwpQdVFkbmJVYUtGWldCbk4yN2pPdmNhVnNod1RQYzVNQjJkT0hHUXhLZFZNUjRyUUsvT0hjZ3k5NW1STjgranZNCnRzczAyTWhxeWhaWUNvVE52Z3dKajJkR0RqY3RlK25oL25zQnQyOENnWUVBK0tQbGh6QWhWMElpemZBWG8vZFkKcXlUbjNFRXBEZWx1VkpMQnZwRlI1bm4vV3MxYXN5MmFXTjd1Unkwcm5KODVRVStSMkRsd2luRG9RL1U1MlNyVgp0czYyby9LNzJWTW9PSzJxQ0VhbllERFRJR3ZmQVVQT3BnUSsrY0N3UW5ub2hhWHlhQ0VrQjV0ZitUczVlWGVGCmY3N3ZZblo2YlJaL3AxQTFrUmUxM1NNQ2dZQjRLd3dYUG91Ykp3bE1zcm1kSjZic3owZ2dzZDBSRkZaN1dQSFQKTWlGSXFJTG9aeG4wRFNRM1lHK2RzUkdHamw3SEdwUkZ1NUdqbC9OQXhIcWNDQVdNN3YzQVduWVp0SVhIQTRTaAo2aWxRV0twOWg2ZHl2VkdvbG9Eb0ttWVFHRzZ3b2tpbzk3UEdObzNDU3d1Umg3QWVKUzcrTmdrSHBIZTQwRkdyCnhGVGdUUUtCZ0ZxVHhBMzczbm05VFFleG9IRHRWVjBRS1ZYd21KL25RR3dRUW51MXB4cnl6aGVpWWhxWUlJVXMKQ1NJMkRwOExsdHNtTUpJcG9ib28vQVIvemtTWjNGUTMrSnJSd1hYdXNDZHNHdnVkd25MYXZid2xQejNEdytnNgo3T0FoUHFrdTRLa0tCbjVwZXNmeVMxTlRSQjhjVk5wMlRBdW92aGoxMGJRVUoxeWZUUjVLCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== kind: Secret metadata: name: hubble-server-certs @@ -1140,7 +1134,7 @@ spec: template: metadata: annotations: - cilium.io/cilium-configmap-checksum: f162e92ee476cd3ae4b3a7fe5a589516559c51f2708f29181b1dead6b1d2381b + cilium.io/cilium-configmap-checksum: 4f10952e95a86affe22cdebe32cd6f453b5a2a05d34475f5ada1d7ad133fc193 prometheus.io/port: "9963" prometheus.io/scrape: "true" labels: @@ -1211,13 +1205,6 @@ spec: initialDelaySeconds: 0 periodSeconds: 5 timeoutSeconds: 3 - resources: - limits: - cpu: 500m - memory: 256Mi - requests: - cpu: 50m - memory: 128Mi terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /tmp/cilium/config-map @@ -1438,7 +1425,7 @@ spec: template: metadata: annotations: - cilium.io/cilium-configmap-checksum: f162e92ee476cd3ae4b3a7fe5a589516559c51f2708f29181b1dead6b1d2381b + cilium.io/cilium-configmap-checksum: 4f10952e95a86affe22cdebe32cd6f453b5a2a05d34475f5ada1d7ad133fc193 labels: app.kubernetes.io/name: cilium-agent app.kubernetes.io/part-of: cilium @@ -1540,13 +1527,6 @@ spec: periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 - resources: - limits: - cpu: 1000m - memory: 1Gi - requests: - cpu: 200m - memory: 512Mi securityContext: capabilities: add: diff --git a/cilium/src/values.yaml b/cilium/src/values.yaml index 3483d74..6142cbf 100644 --- a/cilium/src/values.yaml +++ b/cilium/src/values.yaml @@ -1,91 +1,3823 @@ +# File generated by install/kubernetes/Makefile; DO NOT EDIT. +# This file is based on install/kubernetes/cilium/*values.yaml.tmpl. -bpf: - hostLegacyRouting: true -kubeProxyReplacement: true +# @schema +# type: [null, string] +# @schema +# -- namespaceOverride allows to override the destination namespace for Cilium resources. +# This property allows to use Cilium as part of an Umbrella Chart with different targets. +namespaceOverride: "" +# @schema +# type: [null, object] +# @schema +# -- commonLabels allows users to add common labels for all Cilium resources. +commonLabels: {} +# @schema +# type: [null, string] +# @schema +# -- upgradeCompatibility helps users upgrading to ensure that the configMap for +# Cilium will not change critical values to ensure continued operation +# This flag is not required for new installations. +# For example: '1.7', '1.8', '1.9' +upgradeCompatibility: 1.17.1 +debug: + # -- Enable debug logging + enabled: false + # @schema + # type: [null, string] + # @schema + # -- Configure verbosity levels for debug logging + # This option is used to enable debug messages for operations related to such + # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is + # for enabling debug messages emitted per request, message and connection. + # Multiple values can be set via a space-separated string (e.g. "datapath envoy"). + # + # Applicable values: + # - flow + # - kvstore + # - envoy + # - datapath + # - policy + verbose: ~ +rbac: + # -- Enable creation of Resource-Based Access Control configuration. + create: true +# -- Configure image pull secrets for pulling container images +imagePullSecrets: [] +# - name: "image-pull-secret" -# Talos specific +# -- Configure iptables--random-fully. Disabled by default. View https://github.com/cilium/cilium/issues/13037 for more information. +iptablesRandomFully: false +# -- (string) Kubernetes config path +# @default -- `"~/.kube/config"` +kubeConfigPath: "" +# -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap k8sServiceHost: localhost +# @schema +# type: [string, integer] +# @schema +# -- (string) Kubernetes service port k8sServicePort: 7445 -securityContext: - capabilities: - ciliumAgent: [ CHOWN, KILL, NET_ADMIN, NET_RAW, IPC_LOCK, SYS_ADMIN, SYS_RESOURCE, DAC_OVERRIDE, FOWNER, SETGID, SETUID ] - cleanCiliumState: [ NET_ADMIN, SYS_ADMIN, SYS_RESOURCE ] - -cgroup: - autoMount: - enabled: false - hostRoot: /sys/fs/cgroup - -# https://docs.cilium.io/en/stable/network/concepts/ipam/ -ipam: - mode: kubernetes - -operator: - rollOutPods: true - resources: - limits: - cpu: 500m - memory: 256Mi - requests: - cpu: 50m - memory: 128Mi - -# Roll out cilium agent pods automatically when ConfigMap is updated. -rollOutCiliumPods: true -resources: - limits: - cpu: 1000m - memory: 1Gi - requests: - cpu: 200m - memory: 512Mi - -#debug: -# enabled: true - -# Increase rate limit when doing L2 announcements +# @schema +# type: [null, string] +# @schema +# -- (string) When `k8sServiceHost=auto`, allows to customize the configMap name. It defaults to `cluster-info`. +k8sServiceLookupConfigMapName: "" +# @schema +# type: [null, string] +# @schema +# -- (string) When `k8sServiceHost=auto`, allows to customize the namespace that contains `k8sServiceLookupConfigMapName`. It defaults to `kube-public`. +k8sServiceLookupNamespace: "" +# -- Configure the client side rate limit for the agent +# +# If the amount of requests to the Kubernetes API server exceeds the configured +# rate limit, the agent will start to throttle requests by delaying +# them until there is budget or the request times out. k8sClientRateLimit: + # @schema + # type: [null, integer] + # @schema + # -- (int) The sustained request rate in requests per second. + # @default -- 10 qps: 20 + # @schema + # type: [null, integer] + # @schema + # -- (int) The burst request rate in requests per second. + # The rate limiter will allow short bursts with a higher rate. + # @default -- 20 burst: 100 + # -- Configure the client side rate limit for the Cilium Operator + operator: + # @schema + # type: [null, integer] + # @schema + # -- (int) The sustained request rate in requests per second. + # @default -- 100 + qps: + # @schema + # type: [null, integer] + # @schema + # -- (int) The burst request rate in requests per second. + # The rate limiter will allow short bursts with a higher rate. + # @default -- 200 + burst: +cluster: + # -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. + # It must respect the following constraints: + # * It must contain at most 32 characters; + # * It must begin and end with a lower case alphanumeric character; + # * It may contain lower case alphanumeric characters and dashes between. + # The "default" name cannot be used if the Cluster ID is different from 0. + name: talos + # -- (int) Unique ID of the cluster. Must be unique across all connected + # clusters and in the range of 1 to 255. Only required for Cluster Mesh, + # may be 0 if Cluster Mesh is not used. + id: 1 +# -- Define serviceAccount names for components. +# @default -- Component's fully qualified name. +serviceAccounts: + cilium: + create: true + name: cilium + automount: true + annotations: {} + nodeinit: + create: true + # -- Enabled is temporary until https://github.com/cilium/cilium-cli/issues/1396 is implemented. + # Cilium CLI doesn't create the SAs for node-init, thus the workaround. Helm is not affected by + # this issue. Name and automount can be configured, if enabled is set to true. + # Otherwise, they are ignored. Enabled can be removed once the issue is fixed. + # Cilium-nodeinit DS must also be fixed. + enabled: false + name: cilium-nodeinit + automount: true + annotations: {} + envoy: + create: true + name: cilium-envoy + automount: true + annotations: {} + operator: + create: true + name: cilium-operator + automount: true + annotations: {} + preflight: + create: true + name: cilium-pre-flight + automount: true + annotations: {} + relay: + create: true + name: hubble-relay + automount: false + annotations: {} + ui: + create: true + name: hubble-ui + automount: true + annotations: {} + clustermeshApiserver: + create: true + name: clustermesh-apiserver + automount: true + annotations: {} + # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob + clustermeshcertgen: + create: true + name: clustermesh-apiserver-generate-certs + automount: true + annotations: {} + # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob + hubblecertgen: + create: true + name: hubble-generate-certs + automount: true + annotations: {} +# -- Configure termination grace period for cilium-agent DaemonSet. +terminationGracePeriodSeconds: 1 +# -- Install the cilium agent resources. +agent: true +# -- Agent container name. +name: cilium +# -- Roll out cilium agent pods automatically when configmap is updated. +rollOutCiliumPods: true +# -- Agent container image. +image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.17.8" + pullPolicy: "IfNotPresent" + # cilium-digest + digest: "sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636" + useDigest: true +# -- Scheduling configurations for cilium pods +scheduling: + # @schema + # enum: ["anti-affinity", "kube-scheduler"] + # @schema + # -- Mode specifies how Cilium daemonset pods should be scheduled to Nodes. + # `anti-affinity` mode applies a pod anti-affinity rule to the cilium daemonset. + # Pod anti-affinity may significantly impact scheduling throughput for large clusters. + # See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + # `kube-scheduler` mode forgoes the anti-affinity rule for full scheduling throughput. + # Kube-scheduler avoids host port conflict when scheduling pods. + # @default -- Defaults to apply a pod anti-affinity rule to the agent pod - `anti-affinity` + mode: anti-affinity +# -- Affinity for cilium-agent. +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium +# -- Node selector for cilium-agent. +nodeSelector: + kubernetes.io/os: linux +# -- Node tolerations for agent scheduling to nodes with taints +# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +# -- The priority class to use for cilium-agent. +priorityClassName: "" +# -- DNS policy for Cilium agent pods. +# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: "" +# -- Additional containers added to the cilium DaemonSet. +extraContainers: [] +# -- Additional initContainers added to the cilium Daemonset. +extraInitContainers: [] +# -- Additional agent container arguments. +extraArgs: [] +# -- Additional agent container environment variables. +extraEnv: [] +# -- Additional agent hostPath mounts. +extraHostPathMounts: [] +# - name: host-mnt-data +# mountPath: /host/mnt/data +# hostPath: /mnt/data +# hostPathType: Directory +# readOnly: true +# mountPropagation: HostToContainer +# -- Additional agent volumes. +extraVolumes: [] +# -- Additional agent volumeMounts. +extraVolumeMounts: [] +# -- extraConfig allows you to specify additional configuration parameters to be +# included in the cilium-config configmap. +extraConfig: {} +# my-config-a: "1234" +# my-config-b: |- +# test 1 +# test 2 +# test 3 + +# -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent) +annotations: {} +# -- Security Context for cilium-agent pods. +podSecurityContext: + # -- AppArmorProfile options for the `cilium-agent` and init containers + appArmorProfile: + type: "Unconfined" + seccompProfile: + type: "Unconfined" +# -- Annotations to be added to agent pods +podAnnotations: {} +# -- Labels to be added to agent pods +podLabels: {} +# -- Agent resource limits & requests +# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +resources: {} +# limits: +# cpu: 4000m +# memory: 4Gi +# requests: +# cpu: 100m +# memory: 512Mi + +# -- resources & limits for the agent init containers +initResources: {} +securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-agent` and init containers + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-agent` container + ciliumAgent: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + #- SYS_MODULE + # Needed to switch network namespaces (used for health endpoint, socket-LB). + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + # Allow discretionary access control (e.g. required for package installation) + - DAC_OVERRIDE + # Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation) + - FOWNER + # Allow to execute program that changes GID (e.g. required for package installation) + - SETGID + # Allow to execute program that changes UID (e.g. required for package installation) + - SETUID + # -- Capabilities for the `mount-cgroup` init container + mountCgroup: + # Only used for 'mount' cgroup + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- capabilities for the `apply-sysctl-overwrites` init container + applySysctlOverwrites: + # Required in order to access host's /etc/sysctl.d dir + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- Capabilities for the `clean-cilium-state` init container + cleanCiliumState: + # Most of the capabilities here are the same ones used in the + # cilium-agent's container because this container can be used to + # uninstall all Cilium resources, and therefore it is likely that + # will need the same capabilities. + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used in iptables. Consider removing once we are iptables-free + #- SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF +# -- Cilium agent update strategy +updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 2 +# Configuration Values for cilium-agent +aksbyocni: + # -- Enable AKS BYOCNI integration. + # Note that this is incompatible with AKS clusters not created in BYOCNI mode: + # use Azure integration (`azure.enabled`) instead. + enabled: false +# @schema +# type: [boolean, string] +# @schema +# -- Enable installation of PodCIDR routes between worker +# nodes if worker nodes share a common L2 network segment. +autoDirectNodeRoutes: false +# -- Enable skipping of PodCIDR routes between worker +# nodes if the worker nodes are in a different L2 network segment. +directRoutingSkipUnreachable: false +# -- Annotate k8s node upon initialization with Cilium's metadata. +annotateK8sNode: false +azure: + # -- Enable Azure integration. + # Note that this is incompatible with AKS clusters created in BYOCNI mode: use + # AKS BYOCNI integration (`aksbyocni.enabled`) instead. + enabled: false + # usePrimaryAddress: false + # resourceGroup: group1 + # subscriptionID: 00000000-0000-0000-0000-000000000000 + # tenantID: 00000000-0000-0000-0000-000000000000 + # clientID: 00000000-0000-0000-0000-000000000000 + # clientSecret: 00000000-0000-0000-0000-000000000000 + # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000 +alibabacloud: + # -- Enable AlibabaCloud ENI integration + enabled: false +# -- Enable bandwidth manager to optimize TCP and UDP workloads and allow +# for rate-limiting traffic from individual Pods with EDT (Earliest Departure +# Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. +bandwidthManager: + # -- Enable bandwidth manager infrastructure (also prerequirement for BBR) + enabled: false + # -- Activate BBR TCP congestion control for Pods + bbr: false +# -- Configure standalone NAT46/NAT64 gateway +nat46x64Gateway: + # -- Enable RFC8215-prefixed translation + enabled: false +# -- EnableHighScaleIPcache enables the special ipcache mode for high scale +# clusters. The ipcache content will be reduced to the strict minimum and +# traffic will be encapsulated to carry security identities. +highScaleIPcache: + # -- Enable the high scale mode for the ipcache. + enabled: false +# -- Configure L2 announcements l2announcements: + # -- Enable L2 announcements enabled: true + # -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked + # leaseDuration: 15s + # -- The interval at which the leader will renew the lease + # leaseRenewDeadline: 5s + # -- The timeout between retries if renewal fails + # leaseRetryPeriod: 2s +# -- Configure L2 pod announcements +l2podAnnouncements: + # -- Enable L2 pod announcements + enabled: false + # -- Interface used for sending Gratuitous ARP pod announcements + interface: "eth0" +# -- This feature set enables virtual BGP routers to be created via +# CiliumBGPPeeringPolicy CRDs. +bgpControlPlane: + # -- Enables the BGP control plane. + enabled: false + # -- SecretsNamespace is the namespace which BGP support will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for BGP secrets. + create: false + # -- The name of the secret namespace to which Cilium agents are given read access + name: kube-system + # -- Status reporting settings (BGPv2 only) + statusReport: + # -- Enable/Disable BGPv2 status reporting + # It is recommended to enable status reporting in general, but if you have any issue + # such as high API server load, you can disable it by setting this to false. + enabled: true +pmtuDiscovery: + # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to + # the client. + enabled: false +bpf: + autoMount: + # -- Enable automatic mount of BPF filesystem + # When `autoMount` is enabled, the BPF filesystem is mounted at + # `bpf.root` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # bpffs filesystem at the specified `bpf.root` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: true + # -- Configure the mount point for the BPF filesystem + root: /sys/fs/bpf + # -- Enables pre-allocation of eBPF map values. This increases + # memory usage but can reduce latency. + preallocateMaps: false + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries in auth map. + # @default -- `524288` + authMapMax: ~ + # -- Enable CT accounting for packets and bytes + ctAccounting: false + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries in the TCP connection tracking + # table. + # @default -- `524288` + ctTcpMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the non-TCP connection + # tracking table. + # @default -- `262144` + ctAnyMax: ~ + # -- Control to use a distributed per-CPU backend memory for the core BPF LRU maps + # which Cilium uses. This improves performance significantly, but it is also + # recommended to increase BPF map sizing along with that. + distributedLRU: + # -- Enable distributed LRU backend memory. For compatibility with existing + # installations it is off by default. + enabled: false + # -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. + # Helm configuration for BPF events map rate limiting is experimental and might change + # in upcoming releases. + events: + # -- Default settings for all types of events except dbg and pcap. + default: + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the limit of messages per second that can be written to + # BPF events map. The number of messages is averaged, meaning that if no messages + # were written to the map over 5 seconds, it's possible to write more events + # in the 6th second. If rateLimit is greater than 0, non-zero value for burstLimit must + # also be provided lest the configuration is considered invalid. Setting both burstLimit + # and rateLimit to 0 disables BPF events rate limiting. + # @default -- `0` + rateLimit: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of messages that can be written to BPF events + # map in 1 second. If burstLimit is greater than 0, non-zero value for rateLimit must + # also be provided lest the configuration is considered invalid. Setting both burstLimit + # and rateLimit to 0 disables BPF events rate limiting. + # @default -- `0` + burstLimit: ~ + drop: + # -- Enable drop events. + enabled: true + policyVerdict: + # -- Enable policy verdict events. + enabled: true + trace: + # -- Enable trace events. + enabled: true + # @schema + # type: [null, integer] + # @schema + # -- Configure the maximum number of service entries in the + # load balancer maps. + lbMapMax: 65536 + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the NAT table. + # @default -- `524288` + natMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the neighbor table. + # @default -- `524288` + neighMax: ~ + # @schema + # type: [null, integer] + # @schema + # @default -- `16384` + # -- (int) Configures the maximum number of entries for the node table. + nodeMapMax: ~ + # -- Configure the maximum number of entries in endpoint policy map (per endpoint). + # @schema + # type: [null, integer] + # @schema + policyMapMax: 16384 + # @schema + # type: [null, number, string] + # @schema + # -- (float64) Configure auto-sizing for all BPF maps based on available memory. + # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ + # @default -- `0.0025` + mapDynamicSizeRatio: ~ + # -- Configure the level of aggregation for monitor notifications. + # Valid options are none, low, medium, maximum. + monitorAggregation: medium + # -- Configure the typical time between monitor notifications for + # active connections. + monitorInterval: "5s" + # -- Configure which TCP flags trigger notifications when seen for the + # first time in a connection. + monitorFlags: "all" + # -- (bool) Allow cluster external access to ClusterIP services. + # @default -- `false` + lbExternalClusterIP: false + # -- (bool) Enable loadBalancerSourceRanges CIDR filtering for all service + # types, not just LoadBalancer services. The corresponding NodePort and + # ClusterIP (if enabled for cluster-external traffic) will also apply the + # CIDR filter. + # @default -- `false` + lbSourceRangeAllTypes: false + # -- (bool) Enable the option to define the load balancing algorithm on + # a per-service basis through service.cilium.io/lb-algorithm annotation. + # @default -- `false` + lbAlgorithmAnnotation: false + # -- (bool) Enable the option to define the load balancing mode (SNAT or DSR) + # on a per-service basis through service.cilium.io/forwarding-mode annotation. + # @default -- `false` + lbModeAnnotation: false + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Enable native IP masquerade support in eBPF + # @default -- `false` + masquerade: ~ + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Configure whether direct routing mode should route traffic via + # host stack (true) or directly and more efficiently out of BPF (false) if + # the kernel supports it. The latter has the implication that it will also + # bypass netfilter in the host namespace. + # @default -- `false` + hostLegacyRouting: true + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Configure the eBPF-based TPROXY (beta) to reduce reliance on iptables rules + # for implementing Layer 7 policy. + # @default -- `false` + tproxy: ~ + # @schema + # type: [null, array] + # @schema + # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass. + # [0] will allow all VLAN id's without any filtering. + # @default -- `[]` + vlanBypass: ~ + # -- (bool) Disable ExternalIP mitigation (CVE-2020-8554) + # @default -- `false` + disableExternalIPMitigation: false + # -- (bool) Attach endpoint programs using tcx instead of legacy tc hooks on + # supported kernels. + # @default -- `true` + enableTCX: true + # -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) + # @default -- `veth` + datapathMode: veth +# -- Enable BPF clock source probing for more efficient tick retrieval. +bpfClockProbe: false +# -- Clean all eBPF datapath state from the initContainer of the cilium-agent +# DaemonSet. +# +# WARNING: Use with care! +cleanBpfState: false +# -- Clean all local Cilium state from the initContainer of the cilium-agent +# DaemonSet. Implies cleanBpfState: true. +# +# WARNING: Use with care! +cleanState: false +# -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" +# init container before launching cilium-agent. +# More context can be found in the commit message of below PR +# https://github.com/cilium/cilium/pull/20123 +waitForKubeProxy: false +cni: + # -- Install the CNI configuration and binary files into the filesystem. + install: true + # -- Remove the CNI configuration and binary files on agent shutdown. Enable this + # if you're removing Cilium from the cluster. Disable this to prevent the CNI + # configuration file from being removed during agent upgrade, which can cause + # nodes to go unmanageable. + uninstall: false + # @schema + # type: [null, string] + # @schema + # -- Configure chaining on top of other CNI plugins. Possible values: + # - none + # - aws-cni + # - flannel + # - generic-veth + # - portmap + chainingMode: ~ + # @schema + # type: [null, string] + # @schema + # -- A CNI network name in to which the Cilium plugin should be added as a chained plugin. + # This will cause the agent to watch for a CNI network with this network name. When it is + # found, this will be used as the basis for Cilium's CNI configuration file. If this is + # set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode + # of aws-cni implies a chainingTarget of aws-cni. + chainingTarget: ~ + # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the + # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. + # This ensures no Pods can be scheduled using other CNI plugins during Cilium + # agent downtime. + exclusive: true + # -- Configure the log file for CNI logging with retention policy of 7 days. + # Disable CNI file logging by setting this field to empty explicitly. + logFile: /var/run/cilium/cilium-cni.log + # -- Skip writing of the CNI configuration. This can be used if + # writing of the CNI configuration is performed by external automation. + customConf: false + # -- Configure the path to the CNI configuration directory on the host. + confPath: /etc/cni/net.d + # -- Configure the path to the CNI binary directory on the host. + binPath: /opt/cni/bin + # -- Specify the path to a CNI config to read from on agent start. + # This can be useful if you want to manage your CNI + # configuration outside of a Kubernetes environment. This parameter is + # mutually exclusive with the 'cni.configMap' parameter. The agent will + # write this to 05-cilium.conflist on startup. + # readCniConf: /host/etc/cni/net.d/05-sample.conflist.input -externalIPs: - enabled: true + # -- When defined, configMap will mount the provided value as ConfigMap and + # interpret the cniConf variable as CNI configuration file and write it + # when the agent starts up + # configMap: cni-configuration + # -- Configure the key in the CNI ConfigMap to read the contents of + # the CNI configuration from. + configMapKey: cni-config + # -- Configure the path to where to mount the ConfigMap inside the agent pod. + confFileMountPath: /tmp/cni-configuration + # -- Configure the path to where the CNI configuration directory is mounted + # inside the agent pod. + hostConfDirMountPath: /host/etc/cni/net.d + # -- Specifies the resources for the cni initContainer + resources: + requests: + cpu: 100m + memory: 10Mi + # -- Enable route MTU for pod netns when CNI chaining is used + enableRouteMTUForCNIChaining: false +# -- (string) Configure how frequently garbage collection should occur for the datapath +# connection tracking table. +# @default -- `"0s"` +conntrackGCInterval: "" +# -- (string) Configure the maximum frequency for the garbage collection of the +# connection tracking table. Only affects the automatic computation for the frequency +# and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently +# clean up unused identities created from ToFQDN policies. +conntrackGCMaxInterval: "" +# -- (string) Configure timeout in which Cilium will exit if CRDs are not available +# @default -- `"5m"` +crdWaitTimeout: "" +# -- Tail call hooks for custom eBPF programs. +customCalls: + # -- Enable tail call hooks for custom eBPF programs. + enabled: false +daemon: + # -- Configure where Cilium runtime state should be stored. + runPath: "/var/run/cilium" + # @schema + # type: [null, string] + # @schema + # -- Configure a custom list of possible configuration override sources + # The default is "config-map:cilium-config,cilium-node-config". For supported + # values, see the help text for the build-config subcommand. + # Note that this value should be a comma-separated string. + configSources: ~ + # @schema + # type: [null, string] + # @schema + # -- allowedConfigOverrides is a list of config-map keys that can be overridden. + # That is to say, if this value is set, config sources (excepting the first one) can + # only override keys in this list. + # + # This takes precedence over blockedConfigOverrides. + # + # By default, all keys may be overridden. To disable overrides, set this to "none" or + # change the configSources variable. + allowedConfigOverrides: ~ + # @schema + # type: [null, string] + # @schema + # -- blockedConfigOverrides is a list of config-map keys that may not be overridden. + # In other words, if any of these keys appear in a configuration source excepting the + # first one, they will be ignored + # + # This is ignored if allowedConfigOverrides is set. + # + # By default, all keys may be overridden. + blockedConfigOverrides: ~ + # @schema + # type: [null, boolean] + # @schema + # -- enableSourceIPVerification is a boolean flag to enable or disable the Source IP verification + # of endpoints. This flag is useful when Cilium is chained with other CNIs. + # + # By default, this functionality is enabled + enableSourceIPVerification: true +# -- Specify which network interfaces can run the eBPF datapath. This means +# that a packet sent from a pod to a destination outside the cluster will be +# masqueraded (to an output device IPv4 address), if the output device runs the +# program. When not specified, probing will automatically detect devices that have +# a non-local route. This should be used only when autodetection is not suitable. devices: eth+ -enableCiliumEndpointSlice: true +# -- Enables experimental support for the detection of new and removed datapath +# devices. When devices change the eBPF datapath is reloaded and services updated. +# If "devices" is set then only those devices, or devices matching a wildcard will +# be considered. +# +# This option has been deprecated and is a no-op. +enableRuntimeDeviceDetection: true +# -- Forces the auto-detection of devices, even if specific devices are explicitly listed +forceDeviceDetection: false +# -- Chains to ignore when installing feeder rules. +# disableIptablesFeederRules: "" -loadBalancer: - # https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/#maglev-consistent-hashing - algorithm: maglev +# -- Limit iptables-based egress masquerading to interface selector. +# egressMasqueradeInterfaces: "" -gatewayAPI: - enabled: true -envoy: - securityContext: - capabilities: - keepCapNetBindService: true - envoy: [ NET_ADMIN, PERFMON, BPF ] +# -- Enable setting identity mark for local traffic. +# enableIdentityMark: true +# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. +# enableK8sEndpointSlice: true + +# -- Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead). +enableCiliumEndpointSlice: false +ciliumEndpointSlice: + # -- Enable Cilium EndpointSlice feature. + enabled: false + # -- List of rate limit options to be used for the CiliumEndpointSlice controller. + # Each object in the list must have the following fields: + # nodes: Count of nodes at which to apply the rate limit. + # limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. + # burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. + rateLimits: + - nodes: 0 + limit: 10 + burst: 20 + - nodes: 100 + limit: 50 + burst: 100 + # @schema + # enum: ["identity", "fcfs"] + # @schema + # -- The slicing mode to use for CiliumEndpointSlices. + # identity groups together CiliumEndpoints that share the same identity. + # fcfs groups together CiliumEndpoints in a first-come-first-serve basis, filling in the largest non-full slice first. + sliceMode: identity +envoyConfig: + # -- Enable CiliumEnvoyConfig CRD + # CiliumEnvoyConfig CRD can also be implicitly enabled by other options. + enabled: false + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for CiliumEnvoyConfig CRDs. + create: true + # -- The name of the secret namespace to which Cilium agents are given read access. + name: cilium-secrets + # -- Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated. + retryInterval: 15s ingressController: + # -- Enable cilium ingress controller + # This will automatically set enable-envoy-config as well. enabled: true + # -- Set cilium ingress controller to be the default ingress controller + # This will let cilium ingress controller route entries without ingress class set default: true + # -- Default ingress load balancer mode + # Supported values: shared, dedicated + # For granular control, use the following annotations on the ingress resource: + # "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). loadbalancerMode: shared + # -- Enforce https for host having matching TLS host in Ingress. + # Incoming traffic to http listener will return 308 http error code with respective location in header. + enforceHttps: true + # -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service + ingressLBAnnotationPrefixes: ['lbipam.cilium.io', 'nodeipam.cilium.io', 'service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] + # @schema + # type: [null, string] + # @schema + # -- Default secret namespace for ingresses without .spec.tls[].secretName set. + defaultSecretNamespace: + # @schema + # type: [null, string] + # @schema + # -- Default secret name for ingresses without .spec.tls[].secretName set. + defaultSecretName: + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Ingress. + create: true + # -- Name of Ingress secret namespace. + name: cilium-secrets + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + # -- Load-balancer service in shared mode. + # This is a single load-balancer service for all Ingress resources. service: + # -- Service name + name: cilium-ingress + # -- Labels to be added for the shared LB service + labels: {} + # -- Annotations to be added for the shared LB service annotations: io.cilium/lb-ipam-ips: 192.168.0.180 - -hubble: - peerService: - clusterDomain: cluster.local + # -- Service type for the shared LB service + type: LoadBalancer + # @schema + # type: [null, integer] + # @schema + # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service + insecureNodePort: ~ + # @schema + # type: [null, integer] + # @schema + # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service + secureNodePort: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP on the shared LB service + loadBalancerIP: ~ + # @schema + # type: [null, boolean] + # @schema + # -- Configure if node port allocation is required for LB service + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + allocateLoadBalancerNodePorts: ~ + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode. + # Valid values are "Cluster" and "Local". + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # -- Configure a specific port on the host network that gets used for the shared listener. + sharedListenerPort: 8080 + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} +gatewayAPI: + # -- Enable support for Gateway API in cilium + # This will automatically set enable-envoy-config as well. enabled: true + # -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. + enableAppProtocol: false + # -- Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1. + # Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`. + enableAlpn: false + # -- The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address. + xffNumTrustedHops: 0 + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local". + # Note that this value will be ignored when `hostNetwork.enabled == true`. + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + gatewayClass: + # -- Enable creation of GatewayClass resource + # The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster. + # Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively. + create: auto + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Gateway API. + create: true + # -- Name of Gateway API secret namespace. + name: cilium-secrets + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} +# -- Enables the fallback compatibility solution for when the xt_socket kernel +# module is missing and it is needed for the datapath L7 redirection to work +# properly. See documentation for details on when this can be disabled: +# https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. +enableXTSocketFallback: true +encryption: + # -- Enable transparent network encryption. + enabled: false + # -- Encryption method. Can be either ipsec or wireguard. + type: ipsec + # -- Enable encryption for pure node to node traffic. + # This option is only effective when encryption.type is set to "wireguard". + nodeEncryption: false + # -- Configure the WireGuard Pod2Pod strict mode. + strictMode: + # -- Enable WireGuard Pod2Pod strict mode. + enabled: false + # -- CIDR for the WireGuard Pod2Pod strict mode. + cidr: "" + # -- Allow dynamic lookup of remote node identities. + # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. + allowRemoteNodeIdentities: false + ipsec: + # -- Name of the key file inside the Kubernetes secret configured via secretName. + keyFile: keys + # -- Path to mount the secret inside the Cilium pod. + mountPath: /etc/ipsec + # -- Name of the Kubernetes secret containing the encryption keys. + secretName: cilium-ipsec-keys + # -- The interface to use for encrypted traffic. + interface: "" + # -- Enable the key watcher. If disabled, a restart of the agent will be + # necessary on key rotations. + keyWatcher: true + # -- Maximum duration of the IPsec key rotation. The previous key will be + # removed after that delay. + keyRotationDuration: "5m" + # -- Enable IPsec encrypted overlay + encryptedOverlay: false + wireguard: + # -- Controls WireGuard PersistentKeepalive option. Set 0s to disable. + persistentKeepalive: 0s +endpointHealthChecking: + # -- Enable connectivity health checking between virtual endpoints. + enabled: true +endpointRoutes: + # @schema + # type: [boolean, string] + # @schema + # -- Enable use of per endpoint routes instead of routing via + # the cilium_host interface. + enabled: false +k8sNetworkPolicy: + # -- Enable support for K8s NetworkPolicy + enabled: true +# -- Enable endpoint lockdown on policy map overflow. +endpointLockdownOnMapOverflow: false +eni: + # -- Enable Elastic Network Interface (ENI) integration. + enabled: false + # -- Update ENI Adapter limits from the EC2 API + updateEC2AdapterLimitViaAPI: true + # -- Release IPs not used from the ENI + awsReleaseExcessIPs: false + # -- Enable ENI prefix delegation + awsEnablePrefixDelegation: false + # -- EC2 API endpoint to use + ec2APIEndpoint: "" + # -- Tags to apply to the newly created ENIs + eniTags: {} + # -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable. + # @default -- `"5m"` + gcInterval: "" + # -- Additional tags attached to ENIs created by Cilium. + # Dangling ENIs with this tag will be garbage collected + # @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":""}` + gcTags: {} + # -- If using IAM role for Service Accounts will not try to + # inject identity values from cilium-aws kubernetes secret. + # Adds annotation to service account if managed by Helm. + # See https://github.com/aws/amazon-eks-pod-identity-webhook + iamRole: "" + # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetIDsFilter: [] + # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetTagsFilter: [] + # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances + # are going to be used to create new ENIs + instanceTagsFilter: [] +# fragmentTracking enables IPv4 fragment tracking support in the datapath. +# fragmentTracking: true +gke: + # -- Enable Google Kubernetes Engine integration + enabled: false +# -- Enable connectivity health checking. +healthChecking: true +# -- TCP port for the agent health API. This is not the port for cilium-health. +healthPort: 9879 +# -- Number of ICMP requests sent for each health check before marking a node or endpoint unreachable. +healthCheckICMPFailureThreshold: 3 +# -- Configure the host firewall. +hostFirewall: + # -- Enables the enforcement of host policies in the eBPF datapath. + enabled: false +hostPort: + # -- Enable hostPort service support. + enabled: false +# -- Configure socket LB +socketLB: + # -- Enable socket LB + enabled: false + # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. + # hostNamespaceOnly: false + # -- Enable terminating pod connections to deleted service backends. + # terminatePodConnections: true + # -- Enables tracing for socket-based load balancing. + # tracing: true +# -- Configure certificate generation for Hubble integration. +# If hubble.tls.auto.method=cronJob, these values are used +# for the Kubernetes CronJob which will be scheduled regularly to +# (re)generate any certificates not provided manually. +certgen: + # -- When set to true the certificate authority secret is created. + generateCA: true + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/certgen" + tag: "v0.2.1" + digest: "sha256:ab6b1928e9c5f424f6b0f51c68065b9fd85e2f8d3e5f21fbd1a3cb27e6fb9321" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Seconds after which the completed job pod will be deleted + ttlSecondsAfterFinished: 1800 + # -- Labels to be added to hubble-certgen pods + podLabels: {} + # -- Annotations to be added to the hubble-certgen initial Job and CronJob + annotations: + job: {} + cronJob: {} + # -- Node selector for certgen + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- Priority class for certgen + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + priorityClassName: "" + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- Additional certgen volumes. + extraVolumes: [] + # -- Additional certgen volumeMounts. + extraVolumeMounts: [] + # -- Affinity for certgen + affinity: {} +hubble: + # -- Enable Hubble (true by default). + enabled: true + # -- Annotations to be added to all top-level hubble objects (resources under templates/hubble) + annotations: {} + # -- Buffer size of the channel Hubble uses to receive monitor events. If this + # value is not set, the queue size is set to the default monitor queue size. + # eventQueueSize: "" + + # -- Number of recent flows for Hubble to cache. Defaults to 4095. + # Possible values are: + # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, + # 2047, 4095, 8191, 16383, 32767, 65535 + # eventBufferCapacity: "4095" + + # -- Hubble metrics configuration. + # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics + # for more comprehensive documentation about Hubble metrics. + metrics: + # @schema + # type: [null, array] + # @schema + # -- Configures the list of metrics to collect. If empty or null, metrics + # are disabled. + # Example: + # + # enabled: + # - dns:query;ignoreAAAA + # - drop + # - tcp + # - flow + # - icmp + # - http + # + # You can specify the list of metrics from the helm CLI: + # + # --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" + # + enabled: ~ + # -- Enables exporting hubble metrics in OpenMetrics format. + enableOpenMetrics: false + # -- Configure the port the hubble metric server listens on. + port: 9965 + tls: + # Enable hubble metrics server TLS. + enabled: false + # Configure hubble metrics server TLS. + server: + # -- Name of the Secret containing the certificate and key for the Hubble metrics server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble metrics server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble metrics server key (deprecated). + # Use existingSecret instead. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- Configure mTLS for the Hubble metrics server. + mtls: + # When set to true enforces mutual TLS between Hubble Metrics server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + enabled: false + useSecret: false + # -- Name of the ConfigMap containing the CA to validate client certificates against. + # If mTLS is enabled and this is unspecified, it will default to the + # same CA used for Hubble metrics server certificates. + name: ~ + # -- Entry of the ConfigMap containing the CA. + key: ca.crt + # -- Annotations to be added to hubble-metrics service. + serviceAnnotations: {} + serviceMonitor: + # -- Create ServiceMonitor resources for Prometheus Operator. + # This requires the prometheus CRDs to be available. + # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble + labels: {} + # -- Annotations to add to ServiceMonitor hubble + annotations: {} + # -- jobLabel to add for ServiceMonitor hubble + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # -- Relabeling configs for the ServiceMonitor hubble + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor hubble + metricRelabelings: ~ + # Configure TLS for the ServiceMonitor. + # Note, when using TLS you will either need to specify + # tlsConfig.insecureSkipVerify or specify a CA to use. + tlsConfig: {} + # -- Grafana dashboards for hubble + # grafana can import dashboards based on the label and value + # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} + # Dynamic metrics may be reconfigured without a need of agent restarts. + dynamic: + enabled: false + config: + # ---- Name of configmap with configuration that may be altered to reconfigure metric handlers within a running agent. + configMapName: cilium-dynamic-metrics-config + # ---- True if helm installer should create config map. + # Switch to false if you want to self maintain the file content. + createConfigMap: true + # ---- Exporters configuration in YAML format. + content: [] + # - name: dns + # contextOptions: [] + # includeFilters: [] + # excludeFilters: [] + # -- Unix domain socket path to listen to when Hubble is enabled. + socketPath: /var/run/cilium/hubble.sock + # -- Enables redacting sensitive information present in Layer 7 flows. + redact: + enabled: false + http: + # -- Enables redacting URL query (GET) parameters. + # Example: + # + # redact: + # enabled: true + # http: + # urlQuery: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.urlQuery="true" + urlQuery: false + # -- Enables redacting user info, e.g., password when basic auth is used. + # Example: + # + # redact: + # enabled: true + # http: + # userInfo: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.userInfo="true" + userInfo: true + headers: + # -- List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + # Example: + # redact: + # enabled: true + # http: + # headers: + # allow: + # - traceparent + # - tracestate + # - Cache-Control + # + # You can specify the options from the helm CLI: + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control" + allow: [] + # -- List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + # Example: + # redact: + # enabled: true + # http: + # headers: + # deny: + # - Authorization + # - Proxy-Authorization + # + # You can specify the options from the helm CLI: + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization" + deny: [] + kafka: + # -- Enables redacting Kafka's API key. + # Example: + # + # redact: + # enabled: true + # kafka: + # apiKey: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.kafka.apiKey="true" + apiKey: true + # -- An additional address for Hubble to listen to. + # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that + # Hubble is listening on port 4244. + listenAddress: ":4244" + # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. + preferIpv6: false + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Skip Hubble events with unknown cgroup ids + # @default -- `true` + skipUnknownCGroupIDs: ~ + peerService: + # -- Service Port for the Peer service. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + # -- Target Port for the Peer service, must match the hubble.listenAddress' + # port. + targetPort: 4244 + # -- The cluster domain to use to query the Hubble Peer service. It should + # be the local cluster. + clusterDomain: cluster.local + # -- TLS configuration for Hubble + tls: + # -- Enable mutual TLS for listenAddress. Setting this value to false is + # highly discouraged as the Hubble API provides access to potentially + # sensitive network flow metadata and is exposed on the host network. + enabled: true + # -- Configure automatic TLS certificates generation. + auto: + # -- Auto-generate certificates. + # When set to true, automatically generate a CA and certificates to + # enable mTLS between Hubble server and Hubble Relay instances. If set to + # false, the certs for Hubble server need to be provided by setting + # appropriate values below. + enabled: true + # -- Set the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + # + # Defaults to 365 days (1 year) because MacOS does not accept + # self-signed certificates with expirations > 825 days. + certValidityDuration: 365 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Defaults to midnight of the first day of every fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + schedule: "0 0 1 */4 *" + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when hubble.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- The Hubble server certificate and private key + server: + # -- Name of the Secret containing the certificate and key for the Hubble server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble server key (deprecated). + # Use existingSecret instead. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] relay: + # -- Enable Hubble Relay (requires hubble.enabled=true) enabled: true + # -- Roll out Hubble Relay pods automatically when configmap is updated. rollOutPods: true + # -- Hubble-relay container image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-relay" + tag: "v1.17.8" + # hubble-relay-digest + digest: "sha256:2e576bf7a02291c07bffbc1ca0a66a6c70f4c3eb155480e5b3ac027bedd2858b" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Specifies the resources for the hubble-relay pods + resources: {} + # -- Number of replicas run for the hubble-relay deployment. + replicas: 1 + # -- Affinity for hubble-replay + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + # -- Pod topology spread constraints for hubble-relay + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- Additional hubble-relay environment variables. + extraEnv: [] + # -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay) + annotations: {} + # -- Annotations to be added to hubble-relay pods + podAnnotations: {} + # -- Labels to be added to hubble-relay pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- The priority class to use for hubble-relay + priorityClassName: "" + # -- Configure termination grace period for hubble relay Deployment. + terminationGracePeriodSeconds: 1 + # -- hubble-relay update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 1 + # -- Additional hubble-relay volumes. + extraVolumes: [] + # -- Additional hubble-relay volumeMounts. + extraVolumeMounts: [] + # -- hubble-relay pod security context + podSecurityContext: + fsGroup: 65532 + # -- hubble-relay container security context + securityContext: + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + capabilities: + drop: + - ALL + # -- hubble-relay service configuration. + service: + # --- The type of service used for Hubble Relay access, either ClusterIP, NodePort or LoadBalancer. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31234 + # -- Host to listen to. Specify an empty string to bind to all the interfaces. + listenHost: "" + # -- Port to listen to. + listenPort: "4245" + # -- TLS configuration for Hubble Relay + tls: + # -- The hubble-relay client certificate and private key. + # This keypair is presented to Hubble server instances for mTLS + # authentication and is required when hubble.tls.enabled is true. + # These values need to be set manually if hubble.tls.auto.enabled is false. + client: + # -- Name of the Secret containing the certificate and key for the Hubble metrics server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble relay client certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble relay client key (deprecated). + # Use existingSecret instead. + key: "" + # -- The hubble-relay server certificate and private key + server: + # When set to true, enable TLS on for Hubble Relay server + # (ie: for clients connecting to the Hubble Relay API). + enabled: false + # When set to true enforces mutual TLS between Hubble Relay server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + mtls: false + # -- Name of the Secret containing the certificate and key for the Hubble relay server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble relay server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble relay server key (deprecated). + # Use existingSecret instead. + key: "" + # -- extra DNS names added to certificate when its auto gen + extraDnsNames: [] + # -- extra IP addresses added to certificate when its auto gen + extraIpAddresses: [] + # DNS name used by the backend to connect to the relay + # This is a simple workaround as the relay certificates are currently hardcoded to + # *.hubble-relay.cilium.io + # See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546 + # For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local + relayName: "ui.hubble-relay.cilium.io" + # @schema + # type: [null, string] + # @schema + # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). + # + # This option has been deprecated and is a no-op. + dialTimeout: ~ + # @schema + # type: [null, string] + # @schema + # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). + retryTimeout: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Max number of flows that can be buffered for sorting before being sent to the + # client (per request) (e.g. 100). + sortBufferLenMax: ~ + # @schema + # type: [null, string] + # @schema + # -- When the per-request flows sort buffer is not full, a flow is drained every + # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). + sortBufferDrainTimeout: ~ + # -- Port to use for the k8s service backed by hubble-relay pods. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + + # -- Enable prometheus metrics for hubble-relay on the configured port at + # /metrics + prometheus: + enabled: false + port: 9966 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble-relay + labels: {} + # -- Annotations to add to ServiceMonitor hubble-relay + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor hubble-relay + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor hubble-relay + metricRelabelings: ~ + gops: + # -- Enable gops for hubble-relay + enabled: true + # -- Configure gops listen port for hubble-relay + port: 9893 + pprof: + # -- Enable pprof for hubble-relay + enabled: false + # -- Configure pprof listen address for hubble-relay + address: localhost + # -- Configure pprof listen port for hubble-relay + port: 6062 ui: + # -- Whether to enable the Hubble UI. enabled: true + standalone: + # -- When true, it will allow installing the Hubble UI only, without checking dependencies. + # It is useful if a cluster already has cilium and Hubble relay installed and you just + # want Hubble UI to be deployed. + # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` + enabled: false + tls: + # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required + # to provide a volume for mounting the client certificates. + certsVolume: {} + # projected: + # defaultMode: 0400 + # sources: + # - secret: + # name: hubble-ui-client-certs + # items: + # - key: tls.crt + # path: client.crt + # - key: tls.key + # path: client.key + # - key: ca.crt + # path: hubble-relay-ca.crt + # -- Roll out Hubble-ui pods automatically when configmap is updated. rollOutPods: true + tls: + client: + # -- Name of the Secret containing the client certificate and key for Hubble UI + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble UI client certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble UI client key (deprecated). + # Use existingSecret instead. + key: "" + backend: + # -- Hubble-ui backend image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-ui-backend" + tag: "v0.13.3" + digest: "sha256:db1454e45dc39ca41fbf7cad31eec95d99e5b9949c39daaad0fa81ef29d56953" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Hubble-ui backend security context. + securityContext: {} + # -- Additional hubble-ui backend environment variables. + extraEnv: [] + # -- Additional hubble-ui backend volumes. + extraVolumes: [] + # -- Additional hubble-ui backend volumeMounts. + extraVolumeMounts: [] + livenessProbe: + # -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + enabled: false + readinessProbe: + # -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + enabled: false + # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + frontend: + # -- Hubble-ui frontend image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-ui" + tag: "v0.13.3" + digest: "sha256:661d5de7050182d495c6497ff0b007a7a1e379648e60830dd68c4d78ae21761d" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Hubble-ui frontend security context. + securityContext: {} + # -- Additional hubble-ui frontend environment variables. + extraEnv: [] + # -- Additional hubble-ui frontend volumes. + extraVolumes: [] + # -- Additional hubble-ui frontend volumeMounts. + extraVolumeMounts: [] + # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + server: + # -- Controls server listener for ipv6 + ipv6: + enabled: true + # -- The number of replicas of Hubble UI to deploy. + replicas: 1 + # -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui) + annotations: {} + # -- Additional labels to be added to 'hubble-ui' deployment object + labels: {} + # -- Annotations to be added to hubble-ui pods + podAnnotations: {} + # -- Labels to be added to hubble-ui pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- Affinity for hubble-ui + affinity: {} + # -- Pod topology spread constraints for hubble-ui + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- The priority class to use for hubble-ui + priorityClassName: "" + # -- hubble-ui update strategy. + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 1 + # -- Security context to be added to Hubble UI pods + securityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + # -- hubble-ui service configuration. + service: + # -- Annotations to be added for the Hubble UI service + annotations: {} + # --- The type of service used for Hubble UI access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31235 + # -- Defines base url prefix for all hubble-ui http requests. + # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. + # Trailing `/` is required for custom path, ex. `/service-map/` + baseUrl: "/" + # -- hubble-ui ingress configuration. + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + className: "" + hosts: + - chart-example.local + labels: {} + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # -- Hubble flows export. + export: + # --- Defines max file size of output file before it gets rotated. + fileMaxSizeMb: 10 + # --- Defines max number of backup/rotated files. + fileMaxBackups: 5 + # --- Static exporter configuration. + # Static exporter is bound to agent lifecycle. + static: + enabled: false + filePath: /var/run/cilium/hubble/events.log + fieldMask: [] + # - time + # - source + # - destination + # - verdict + allowList: [] + # - '{"verdict":["DROPPED","ERROR"]}' + denyList: [] + # - '{"source_pod":["kube-system/"]}' + # - '{"destination_pod":["kube-system/"]}' + # --- Dynamic exporters configuration. + # Dynamic exporters may be reconfigured without a need of agent restarts. + dynamic: + enabled: false + config: + # ---- Name of configmap with configuration that may be altered to reconfigure exporters within a running agents. + configMapName: cilium-flowlog-config + # ---- True if helm installer should create config map. + # Switch to false if you want to self maintain the file content. + createConfigMap: true + # ---- Exporters configuration in YAML format. + content: + - name: all + fieldMask: [] + includeFilters: [] + excludeFilters: [] + filePath: "/var/run/cilium/hubble/events.log" + # - name: "test002" + # filePath: "/var/log/network/flow-log/pa/test002.log" + # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] + # includeFilters: + # - source_pod: ["default/"] + # event_type: + # - type: 1 + # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] + # excludeFilters: [] + # end: "2023-10-09T23:59:59-07:00" + # -- Emit v1.Events related to pods on detection of packet drops. + # This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. + dropEventEmitter: + enabled: false + # --- Minimum time between emitting same events. + interval: 2m + # --- Drop reasons to emit events for. + # ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason + reasons: + - auth_required + - policy_denied +# -- Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends). +identityAllocationMode: "crd" +# -- (string) Time to wait before using new identity on endpoint identity change. +# @default -- `"5s"` +identityChangeGracePeriod: "" +# -- Install Iptables rules to skip netfilter connection tracking on all pod +# traffic. This option is only effective when Cilium is running in direct +# routing and full KPR mode. Moreover, this option cannot be enabled when Cilium +# is running in a managed Kubernetes environment or in a chained CNI setup. +installNoConntrackIptablesRules: false +ipam: + # -- Configure IP Address Management mode. + # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ + mode: kubernetes + # -- Maximum rate at which the CiliumNode custom resource is updated. + ciliumNodeUpdateRate: "15s" + # -- Pre-allocation settings for IPAM in Multi-Pool mode + multiPoolPreAllocation: "" + # -- Install ingress/egress routes through uplink on host for Pods when working with delegated IPAM plugin. + installUplinkRoutesForDelegatedIPAM: false + operator: + # @schema + # type: [array, string] + # @schema + # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"] + # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv4MaskSize: 24 + # @schema + # type: [array, string] + # @schema + # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv6PodCIDRList: ["fd00::/104"] + # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv6MaskSize: 120 + # -- IP pools to auto-create in multi-pool IPAM mode. + autoCreateCiliumPodIPPools: {} + # default: + # ipv4: + # cidrs: + # - 10.10.0.0/8 + # maskSize: 24 + # other: + # ipv6: + # cidrs: + # - fd00:100::/80 + # maskSize: 96 + # @schema + # type: [null, integer] + # @schema + # -- (int) The maximum burst size when rate limiting access to external APIs. + # Also known as the token bucket capacity. + # @default -- `20` + externalAPILimitBurstSize: ~ + # @schema + # type: [null, number] + # @schema + # -- (float) The maximum queries per second when rate limiting access to + # external APIs. Also known as the bucket refill rate, which is used to + # refill the bucket up to the burst size capacity. + # @default -- `4.0` + externalAPILimitQPS: ~ +# -- defaultLBServiceIPAM indicates the default LoadBalancer Service IPAM when +# no LoadBalancer class is set. Applicable values: lbipam, nodeipam, none +# @schema +# type: [string] +# @schema +defaultLBServiceIPAM: lbipam +nodeIPAM: + # -- Configure Node IPAM + # ref: https://docs.cilium.io/en/stable/network/node-ipam/ + enabled: false +# @schema +# type: [null, string] +# @schema +# -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API +apiRateLimit: ~ +# -- Configure the eBPF-based ip-masq-agent +ipMasqAgent: + enabled: false +# the config of nonMasqueradeCIDRs +# config: +# nonMasqueradeCIDRs: [] +# masqLinkLocal: false +# masqLinkLocalIPv6: false + +# iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. +# iptablesLockTimeout: "5s" +ipv4: + # -- Enable IPv4 support. + enabled: true +ipv6: + # -- Enable IPv6 support. + enabled: false +# -- Configure Kubernetes specific configuration +k8s: + # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + requireIPv4PodCIDR: false + # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + requireIPv6PodCIDR: false +# -- Keep the deprecated selector labels when deploying Cilium DaemonSet. +keepDeprecatedLabels: false +# -- Keep the deprecated probes when deploying Cilium DaemonSet +keepDeprecatedProbes: false +startupProbe: + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 +livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 + # -- whether to require k8s connectivity as part of the check. + requireK8sConnectivity: false +readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 +# -- Configure the kube-proxy replacement in Cilium BPF datapath +# Valid options are "true" or "false". +# ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ +kubeProxyReplacement: true + +# -- healthz server bind address for the kube-proxy replacement. +# To enable set the value to '0.0.0.0:10256' for all ipv4 +# addresses and this '[::]:10256' for all ipv6 addresses. +# By default it is disabled. +kubeProxyReplacementHealthzBindAddr: "" +l2NeighDiscovery: + # -- Enable L2 neighbor discovery in the agent + enabled: true + # -- Override the agent's default neighbor resolution refresh period. + refreshPeriod: "30s" +# -- Enable Layer 7 network policy. +l7Proxy: true +# -- Enable Local Redirect Policy. +localRedirectPolicy: false +# To include or exclude matched resources from cilium identity evaluation +# labels: "" + +# logOptions allows you to define logging options. eg: +# logOptions: +# format: json + +# -- Enables periodic logging of system load +logSystemLoad: false +# -- Configure maglev consistent hashing +maglev: {} +# -- tableSize is the size (parameter M) for the backend table of one +# service entry +# tableSize: + +# -- hashSeed is the cluster-wide base64 encoded seed for the hashing +# hashSeed: + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +enableIPv4Masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +enableIPv6Masquerade: true +# -- Enables masquerading to the source of the route for traffic leaving the node from endpoints. +enableMasqueradeRouteSource: false +# -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods +enableIPv4BIGTCP: false +# -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods +enableIPv6BIGTCP: false +nat: + # -- Number of the top-k SNAT map connections to track in Cilium statedb. + mapStatsEntries: 32 + # -- Interval between how often SNAT map is counted for stats. + mapStatsInterval: 30s +egressGateway: + # -- Enables egress gateway to redirect and SNAT the traffic that leaves the + # cluster. + enabled: false + # -- Time between triggers of egress gateway state reconciliations + reconciliationTriggerInterval: 1s + # -- Maximum number of entries in egress gateway policy map + # maxPolicyEntries: 16384 +vtep: + # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow + # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + enabled: false + # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + endpoint: "" + # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + cidr: "" + # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + mask: "" + # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + mac: "" +# -- (string) Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv4NativeRoutingCIDR: "" +# -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv6NativeRoutingCIDR: "" +# -- cilium-monitor sidecar. +monitor: + # -- Enable the cilium-monitor sidecar. + enabled: false +# -- Configure service load balancing +loadBalancer: + # -- standalone enables the standalone L4LB which does not connect to + # kube-apiserver. + # standalone: false + + # -- algorithm is the name of the load balancing algorithm for backend + # selection e.g. random or maglev + algorithm: maglev + + # -- mode is the operation mode of load balancing for remote backends + # e.g. snat, dsr, hybrid + # mode: snat + + # -- acceleration is the option to accelerate service handling via XDP + # Applicable values can be: disabled (do not use XDP), native (XDP BPF + # program is run directly out of the networking driver's early receive + # path), or best-effort (use native mode XDP acceleration on devices + # that support it). + acceleration: disabled + # -- dsrDispatch configures whether IP option or IPIP encapsulation is + # used to pass a service IP and port to remote backend + # dsrDispatch: opt + + # -- serviceTopology enables K8s Topology Aware Hints -based service + # endpoints filtering + # serviceTopology: false + + # -- experimental enables support for the experimental load-balancing + # control-plane. + experimental: false + # -- L7 LoadBalancer + l7: + # -- Enable L7 service load balancing via envoy proxy. + # The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7, + # will be forwarded to the local backend proxy to be load balanced to the service endpoints. + # Please refer to docs for supported annotations for more configuration. + # + # Applicable values: + # - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well. + # - disabled: Disable L7 load balancing by way of service annotation. + backend: disabled + # -- List of ports from service to be automatically redirected to above backend. + # Any service exposing one of these ports will be automatically redirected. + # Fine-grained control can be achieved by using the service annotation. + ports: [] + # -- Default LB algorithm + # The default LB algorithm to be used for services, which can be overridden by the + # service annotation (e.g. service.cilium.io/lb-l7-algorithm) + # Applicable values: round_robin, least_request, random + algorithm: round_robin +# -- Configure N-S k8s service loadbalancing +nodePort: + # -- Enable the Cilium NodePort service implementation. + enabled: false + # -- Port range to use for NodePort services. + # range: "30000,32767" + + # @schema + # type: [null, string, array] + # @schema + # -- List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing. + # By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used. + # + # Example: + # + # addresses: ["192.168.1.0/24", "2001::/64"] + # + addresses: ~ + # -- Set to true to prevent applications binding to service ports. + bindProtection: true + # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral + # ports is detected. + autoProtectPortRange: true + # -- Enable healthcheck nodePort server for NodePort services + enableHealthCheck: true + # -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs + # EnableHealthCheck to be enabled + enableHealthCheckLoadBalancerIP: false +# policyAuditMode: false + +# -- The agent can be put into one of the three policy enforcement modes: +# default, always and never. +# ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes +policyEnforcementMode: "default" +# @schema +# type: [null, string, array] +# @schema +# -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector. +# The possible value is "nodes". +policyCIDRMatchMode: +pprof: + # -- Enable pprof for cilium-agent + enabled: false + # -- Configure pprof listen address for cilium-agent + address: localhost + # -- Configure pprof listen port for cilium-agent + port: 6060 +# -- Configure prometheus metrics on the configured port at /metrics +prometheus: + metricsService: false + enabled: false + port: 9962 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-agent + labels: {} + # -- Annotations to add to ServiceMonitor cilium-agent + annotations: {} + # -- jobLabel to add for ServiceMonitor cilium-agent + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-agent + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-agent + metricRelabelings: ~ + # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying + trustCRDsExist: false + # @schema + # type: [null, array] + # @schema + # -- Metrics that should be enabled or disabled from the default metric list. + # The list is expected to be separated by a space. (+metric_foo to enable + # metric_foo , -metric_bar to disable metric_bar). + # ref: https://docs.cilium.io/en/stable/observability/metrics/ + metrics: ~ + # --- Enable controller group metrics for monitoring specific Cilium + # subsystems. The list is a list of controller group names. The special + # values of "all" and "none" are supported. The set of controller + # group names is not guaranteed to be stable between Cilium versions. + controllerGroupMetrics: + - write-cni-file + - sync-host-ips + - sync-lb-maps-with-k8s-services +# -- Grafana dashboards for cilium-agent +# grafana can import dashboards based on the label and value +# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards +dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} +# Configure Cilium Envoy options. +envoy: + # @schema + # type: [null, boolean] + # @schema + # -- Enable Envoy Proxy in standalone DaemonSet. + # This field is enabled by default for new installation. + # @default -- `true` for new installation + enabled: ~ + # -- (int) + # Set Envoy'--base-id' to use when allocating shared memory regions. + # Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0' + baseID: 0 + log: + # @schema + # type: [null, string] + # @schema + # -- The format string to use for laying out the log message metadata of Envoy. If specified, Envoy will use text format output. + # This setting is mutually exclusive with envoy.log.format_json. + format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v" + # @schema + # type: [null, object] + # @schema + # -- The JSON logging format to use for Envoy. This setting is mutually exclusive with envoy.log.format. + # ref: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/bootstrap/v3/bootstrap.proto#envoy-v3-api-field-config-bootstrap-v3-bootstrap-applicationlogconfig-logformat-json-format + format_json: null + # date: "%Y-%m-%dT%T.%e" + # thread_id: "%t" + # source_line: "%s:%#" + # level: "%l" + # logger: "%n" + # message: "%j" + # -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout. + path: "" + # @schema + # oneOf: + # - type: [null] + # - enum: [trace,debug,info,warning,error,critical,off] + # @schema + # -- Default log level of Envoy application log that is configured if Cilium debug / verbose logging isn't enabled. + # This option allows to have a different log level than the Cilium Agent - e.g. lower it to `critical`. + # Possible values: trace, debug, info, warning, error, critical, off + # @default -- Defaults to the default log level of the Cilium Agent - `info` + defaultLevel: ~ + # @schema + # type: [null, integer] + # @schema + # -- Size of the Envoy access log buffer created within the agent in bytes. + # Tune this value up if you encounter "Envoy: Discarded truncated access log message" errors. + # Large request/response header sizes (e.g. 16KiB) will require a larger buffer size. + accessLogBufferSize: 4096 + # -- Time in seconds after which a TCP connection attempt times out + connectTimeoutSeconds: 2 + # -- Time in seconds after which the initial fetch on an xDS stream is considered timed out + initialFetchTimeoutSeconds: 30 + # -- Maximum number of concurrent retries on Envoy clusters + maxConcurrentRetries: 128 + # -- Maximum number of retries for each HTTP request + httpRetryCount: 3 + # -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy + maxRequestsPerConnection: 0 + # -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable) + maxConnectionDurationSeconds: 0 + # -- Set Envoy upstream HTTP idle connection timeout seconds. + # Does not apply to connections with pending requests. Default 60s + idleTimeoutDurationSeconds: 60 + # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. + xffNumTrustedHopsL7PolicyIngress: 0 + # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. + xffNumTrustedHopsL7PolicyEgress: 0 + # @schema + # type: [null, string] + # @schema + # -- Max duration to wait for endpoint policies to be restored on restart. Default "3m". + policyRestoreTimeoutDuration: null + # -- Envoy container image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium-envoy" + tag: "v1.33.9-1757932127-3c04e8f2f1027d106b96f8ef4a0215e81dbaaece" + pullPolicy: "IfNotPresent" + digest: "sha256:06fbc4e55d926dd82ff2a0049919248dcc6be5354609b09012b01bc9c5b0ee28" + useDigest: true + # -- Additional containers added to the cilium Envoy DaemonSet. + extraContainers: [] + # -- Additional envoy container arguments. + extraArgs: [] + # -- Additional envoy container environment variables. + extraEnv: [] + # -- Additional envoy hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional envoy volumes. + extraVolumes: [] + # -- Additional envoy volumeMounts. + extraVolumeMounts: [] + # -- Configure termination grace period for cilium-envoy DaemonSet. + terminationGracePeriodSeconds: 1 + # -- TCP port for the health API. + healthPort: 9878 + # -- cilium-envoy update strategy + # ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 2 + # -- Roll out cilium envoy pods automatically when configmap is updated. + rollOutPods: false + # -- ADVANCED OPTION: Bring your own custom Envoy bootstrap ConfigMap. Provide the name of a ConfigMap with a `bootstrap-config.json` key. + # When specified, Envoy will use this ConfigMap instead of the default provided by the chart. + # WARNING: Use of this setting has the potential to prevent cilium-envoy from starting up, and can cause unexpected behavior (e.g. due to + # syntax error or semantically incorrect configuration). Before submitting an issue, please ensure you have disabled this feature, as support + # cannot be provided for custom Envoy bootstrap configs. + # @schema + # type: [null, string] + # @schema + bootstrapConfigMap: ~ + # -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) + annotations: {} + # -- Security Context for cilium-envoy pods. + podSecurityContext: + # -- AppArmorProfile options for the `cilium-agent` and init containers + appArmorProfile: + type: "Unconfined" + # -- Annotations to be added to envoy pods + podAnnotations: {} + # -- Labels to be added to envoy pods + podLabels: {} + # -- Envoy resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + startupProbe: + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 + livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 + readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 + securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-envoy` container + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-envoy` container. + # Even though granted to the container, the cilium-envoy-starter wrapper drops + # all capabilities after forking the actual Envoy process. + # `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by + # setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the + # capability to the container). + # Note: In case of embedded envoy, the capability must be granted to the cilium-agent container. + envoy: + # Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT + - NET_ADMIN + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + #- SYS_ADMIN + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + - PERFMON + - BPF + # -- Keep capability `NET_BIND_SERVICE` for Envoy process. + keepCapNetBindService: true + # -- Affinity for cilium-envoy. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium-envoy + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cilium.io/no-schedule + operator: NotIn + values: + - "true" + # -- Node selector for cilium-envoy. + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for envoy scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # @schema + # type: [null, string] + # @schema + # -- The priority class to use for cilium-envoy. + priorityClassName: ~ + # @schema + # type: [null, string] + # @schema + # -- DNS policy for Cilium envoy pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: ~ + debug: + admin: + # -- Enable admin interface for cilium-envoy. + # This is useful for debugging and should not be enabled in production. + enabled: false + # -- Port number (bound to loopback interface). + # kubectl port-forward can be used to access the admin interface. + port: 9901 + # -- Configure Cilium Envoy Prometheus options. + # Note that some of these apply to either cilium-agent or cilium-envoy. + prometheus: + # -- Enable prometheus metrics for cilium-envoy + enabled: true + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + # Note that this setting applies to both cilium-envoy _and_ cilium-agent + # with Envoy enabled. + enabled: false + # -- Labels to add to ServiceMonitor cilium-envoy + labels: {} + # -- Annotations to add to ServiceMonitor cilium-envoy + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-envoy + # or for cilium-agent with Envoy configured. + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy + # or for cilium-agent with Envoy configured. + metricRelabelings: ~ + # -- Serve prometheus metrics for cilium-envoy on the configured port + port: "9964" +# -- Enable/Disable use of node label based identity +nodeSelectorLabels: false +# -- Enable resource quotas for priority classes used in the cluster. +resourceQuotas: + enabled: false + cilium: + hard: + # 5k nodes * 2 DaemonSets (Cilium and cilium node init) + pods: "10k" + operator: + hard: + # 15 "clusterwide" Cilium Operator pods for HA + pods: "15" +# Need to document default +################## +#sessionAffinity: false + +# -- Do not run Cilium agent when running with clean mode. Useful to completely +# uninstall Cilium as it will stop Cilium from starting and create artifacts +# in the node. +sleepAfterInit: false +# -- Enable check of service source ranges (currently, only for LoadBalancer). +svcSourceRangeCheck: true +# -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. +synchronizeK8sNodes: true +# -- Configure TLS configuration in the agent. +tls: + # @schema + # type: [null, string] + # @schema + # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies + # (namely the secrets referenced by terminatingTLS and originatingTLS). + # This value is DEPRECATED and will be removed in a future version. + # Use `tls.readSecretsOnlyFromSecretsNamespace` instead. + # Possible values: + # - local + # - k8s + secretsBackend: ~ + # @schema + # type: [null, boolean] + # @schema + # -- Configure if the Cilium Agent will only look in `tls.secretsNamespace` for + # CiliumNetworkPolicy relevant Secrets. + # If false, the Cilium Agent will be granted READ (GET/LIST/WATCH) access + # to _all_ secrets in the entire cluster. This is not recommended and is + # included for backwards compatibility. + # This value obsoletes `tls.secretsBackend`, with `true` == `local` in the old + # setting, and `false` == `k8s`. + readSecretsOnlyFromSecretsNamespace: ~ + # -- Configures where secrets used in CiliumNetworkPolicies will be looked for + secretsNamespace: + # -- Create secrets namespace for TLS Interception secrets. + create: true + # -- Name of TLS Interception secret namespace. + name: cilium-secrets + # -- Configures settings for synchronization of TLS Interception Secrets + secretSync: + # @schema + # type: [null, boolean] + # @schema + # -- Enable synchronization of Secrets for TLS Interception. If disabled and + # tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent. + enabled: ~ + # -- Base64 encoded PEM values for the CA certificate and private key. + # This can be used as common CA to generate certificates used by hubble and clustermesh components. + # It is neither required nor used when cert-manager is used to generate the certificates. + ca: + # -- Optional CA cert. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + cert: "" + # -- Optional CA private key. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + key: "" + # -- Generated certificates validity duration in days. This will be used for auto generated CA. + certValidityDuration: 1095 + # -- Configure the CA trust bundle used for the validation of the certificates + # leveraged by hubble and clustermesh. When enabled, it overrides the content of the + # 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time. + caBundle: + # -- Enable the use of the CA trust bundle. + enabled: false + # -- Name of the ConfigMap containing the CA trust bundle. + name: cilium-root-ca.crt + # -- Entry of the ConfigMap containing the CA trust bundle. + key: ca.crt + # -- Use a Secret instead of a ConfigMap. + useSecret: false + # If uncommented, creates the ConfigMap and fills it with the specified content. + # Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace. + # + # content: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- +# -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. +# Possible values: +# - "" +# - vxlan +# - geneve +# @default -- `"vxlan"` +tunnelProtocol: "" +# -- Enable native-routing mode or tunneling mode. +# Possible values: +# - "" +# - native +# - tunnel +# @default -- `"tunnel"` +routingMode: "" +# -- Configure VXLAN and Geneve tunnel port. +# @default -- Port 8472 for VXLAN, Port 6081 for Geneve +tunnelPort: 0 +# -- Configure VXLAN and Geneve tunnel source port range hint. +# @default -- 0-0 to let the kernel driver decide the range +tunnelSourcePortRange: 0-0 +# -- Configure what the response should be to traffic for a service without backends. +# Possible values: +# - reject (default) +# - drop +serviceNoBackendResponse: reject +# -- Configure the underlying network MTU to overwrite auto-detected MTU. +# This value doesn't change the host network interface MTU i.e. eth0 or ens0. +# It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net, +# cilium_vxlan and lxc_health interfaces. +MTU: 0 +# -- Disable the usage of CiliumEndpoint CRD. +disableEndpointCRD: false +wellKnownIdentities: + # -- Enable the use of well-known identities. + enabled: false +etcd: + # -- Enable etcd mode for the agent. + enabled: false + # -- List of etcd endpoints + endpoints: + - https://CHANGE-ME:2379 + # -- Enable use of TLS/SSL for connectivity to etcd. + ssl: false +operator: + # -- Enable the cilium-operator component (required). + enabled: true + # -- Roll out cilium-operator pods automatically when configmap is updated. + rollOutPods: true + # -- cilium-operator image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/operator" + tag: "v1.17.8" + # operator-generic-digest + genericDigest: "sha256:5468807b9c31997f3a1a14558ec7c20c5b962a2df6db633b7afbe2f45a15da1c" + # operator-azure-digest + azureDigest: "sha256:619f9febf3efef2724a26522b253e4595cd33c274f5f49925e29a795fdc2d2d7" + # operator-aws-digest + awsDigest: "sha256:28012f7d0f4f23e9f6c7d6a5dd931afa326bbac3e8103f3f6f22b9670847dffa" + # operator-alibabacloud-digest + alibabacloudDigest: "sha256:72c25a405ad8e58d2cf03f7ea2b6696ed1edcfb51716b5f85e45c6c4fcaa6056" + useDigest: true + pullPolicy: "IfNotPresent" + suffix: "" + # -- Number of replicas to run for the cilium-operator deployment + replicas: 2 + # -- The priority class to use for cilium-operator + priorityClassName: "" + # -- DNS policy for Cilium operator pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: "" + # -- cilium-operator update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxSurge: 25% + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 50% + # -- Affinity for cilium-operator + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + # -- Pod topology spread constraints for cilium-operator + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for cilium-operator pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for cilium-operator scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Additional cilium-operator container arguments. + extraArgs: [] + # -- Additional cilium-operator environment variables. + extraEnv: [] + # -- Additional cilium-operator hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional cilium-operator volumes. + extraVolumes: [] + # -- Additional cilium-operator volumeMounts. + extraVolumeMounts: [] + # -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator) + annotations: {} + # -- HostNetwork setting + hostNetwork: true + # -- Security context to be added to cilium-operator pods + podSecurityContext: {} + # -- Annotations to be added to cilium-operator pods + podAnnotations: {} + # -- Labels to be added to cilium-operator pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- cilium-operator resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 1000m + # memory: 1Gi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Security context to be added to cilium-operator pods + securityContext: {} + # runAsUser: 0 + + # -- Interval for endpoint garbage collection. + endpointGCInterval: "5m0s" + # -- Interval for cilium node garbage collection. + nodeGCInterval: "5m0s" + # -- Interval for identity garbage collection. + identityGCInterval: "15m0s" + # -- Timeout for identity heartbeats. + identityHeartbeatTimeout: "30m0s" + pprof: + # -- Enable pprof for cilium-operator + enabled: false + # -- Configure pprof listen address for cilium-operator + address: localhost + # -- Configure pprof listen port for cilium-operator + port: 6061 + # -- Enable prometheus metrics for cilium-operator on the configured port at + # /metrics + prometheus: + metricsService: false + enabled: true + port: 9963 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-operator + labels: {} + # -- Annotations to add to ServiceMonitor cilium-operator + annotations: {} + # -- jobLabel to add for ServiceMonitor cilium-operator + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor cilium-operator + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-operator + metricRelabelings: ~ + # -- Grafana dashboards for cilium-operator + # grafana can import dashboards based on the label and value + # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} + # -- Skip CRDs creation for cilium-operator + skipCRDCreation: false + # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium + # pod running. + removeNodeTaints: true + # @schema + # type: [null, boolean] + # @schema + # -- Taint nodes where Cilium is scheduled but not running. This prevents pods + # from being scheduled to nodes where Cilium is not the default CNI provider. + # @default -- same as removeNodeTaints + setNodeTaints: ~ + # -- Set Node condition NetworkUnavailable to 'false' with the reason + # 'CiliumIsUp' for nodes that have a healthy Cilium pod. + setNodeNetworkStatus: true + unmanagedPodWatcher: + # -- Restart any pod that are not managed by Cilium. + restart: true + # -- Interval, in seconds, to check if there are any pods that are not + # managed by Cilium. + intervalSeconds: 15 +nodeinit: + # -- Enable the node initialization DaemonSet + enabled: false + # -- node-init image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/startup-script" + tag: "1755531540-60ee83e" + digest: "sha256:5bdca3c2dec2c79f58d45a7a560bf1098c2126350c901379fe850b7f78d3d757" + useDigest: true + pullPolicy: "IfNotPresent" + # -- The priority class to use for the nodeinit pod. + priorityClassName: "" + # -- node-init update strategy + updateStrategy: + type: RollingUpdate + # -- Additional nodeinit environment variables. + extraEnv: [] + # -- Additional nodeinit volumes. + extraVolumes: [] + # -- Additional nodeinit volumeMounts. + extraVolumeMounts: [] + # -- Affinity for cilium-nodeinit + affinity: {} + # -- Node labels for nodeinit pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for nodeinit scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit) + annotations: {} + # -- Annotations to be added to node-init pods. + podAnnotations: {} + # -- Labels to be added to node-init pods. + podLabels: {} + # -- Security Context for cilium-node-init pods. + podSecurityContext: + # -- AppArmorProfile options for the `cilium-node-init` and init containers + appArmorProfile: + type: "Unconfined" + # -- nodeinit resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: 100m + memory: 100Mi + # -- Security context to be added to nodeinit pods. + securityContext: + privileged: false + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # Used for nsenter + - NET_ADMIN + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + # -- bootstrapFile is the location of the file where the bootstrap timestamp is + # written by the node-init DaemonSet + bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time" + # -- startup offers way to customize startup nodeinit script (pre and post position) + startup: + preScript: "" + postScript: "" + # -- prestop offers way to customize prestop nodeinit script (pre and post position) + prestop: + preScript: "" + postScript: "" +preflight: + # -- Enable Cilium pre-flight resources (required for upgrade) + enabled: false + # -- Cilium pre-flight image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.17.8" + # cilium-digest + digest: "sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636" + useDigest: true + pullPolicy: "IfNotPresent" + # -- The priority class to use for the preflight pod. + priorityClassName: "" + # -- preflight update strategy + updateStrategy: + type: RollingUpdate + # -- Additional preflight environment variables. + extraEnv: [] + # -- Additional preflight volumes. + extraVolumes: [] + # -- Additional preflight volumeMounts. + extraVolumeMounts: [] + # -- Affinity for cilium-preflight + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + # -- Node labels for preflight pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for preflight scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight) + annotations: {} + # -- Security context to be added to preflight pods. + podSecurityContext: {} + # -- Annotations to be added to preflight pods + podAnnotations: {} + # -- Labels to be added to the preflight pod. + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- preflight resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + readinessProbe: + # -- For how long kubelet should wait before performing the first probe + initialDelaySeconds: 5 + # -- interval between checks of the readiness probe + periodSeconds: 5 + # -- Security context to be added to preflight pods + securityContext: {} + # runAsUser: 0 + + # -- Path to write the `--tofqdns-pre-cache` file to. + tofqdnsPreCache: "" + # -- Configure termination grace period for preflight Deployment and DaemonSet. + terminationGracePeriodSeconds: 1 + # -- By default we should always validate the installed CNPs before upgrading + # Cilium. This will make sure the user will have the policies deployed in the + # cluster with the right schema. + validateCNPs: true +# -- Explicitly enable or disable priority class. +# .Capabilities.KubeVersion is unsettable in `helm template` calls, +# it depends on k8s libraries version that Helm was compiled against. +# This option allows to explicitly disable setting the priority class, which +# is useful for rendering charts for gke clusters in advance. +enableCriticalPriorityClass: true +# disableEnvoyVersionCheck removes the check for Envoy, which can be useful +# on AArch64 as the images do not currently ship a version of Envoy. +#disableEnvoyVersionCheck: false +clustermesh: + # -- Deploy clustermesh-apiserver for clustermesh + useAPIServer: false + # -- The maximum number of clusters to support in a ClusterMesh. This value + # cannot be changed on running clusters, and all clusters in a ClusterMesh + # must be configured with the same value. Values > 255 will decrease the + # maximum allocatable cluster-local identities. + # Supported values are 255 and 511. + maxConnectedClusters: 255 + # -- Enable the synchronization of Kubernetes EndpointSlices corresponding to + # the remote endpoints of appropriately-annotated global services through ClusterMesh + enableEndpointSliceSynchronization: false + # -- Enable Multi-Cluster Services API support + enableMCSAPISupport: false + # -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config) + annotations: {} + # -- Clustermesh explicit configuration. + config: + # -- Enable the Clustermesh explicit configuration. + enabled: false + # -- Default dns domain for the Clustermesh API servers + # This is used in the case cluster addresses are not provided + # and IPs are used. + domain: mesh.cilium.io + # -- List of clusters to be peered in the mesh. + clusters: [] + # clusters: + # # -- Name of the cluster + # - name: cluster1 + # # -- Address of the cluster, use this if you created DNS records for + # # the cluster Clustermesh API server. + # address: cluster1.mesh.cilium.io + # # -- Port of the cluster Clustermesh API server. + # port: 2379 + # # -- IPs of the cluster Clustermesh API server, use multiple ones when + # # you have multiple IPs to access the Clustermesh API server. + # ips: + # - 172.18.255.201 + # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority. + # # These fields can (and should) be omitted in case the CA is shared across clusters. In that case, the + # # "remote" private key and certificate available in the local cluster are automatically used instead. + # tls: + # cert: "" + # key: "" + # caCert: "" + apiserver: + # -- Clustermesh API server image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/clustermesh-apiserver" + tag: "v1.17.8" + # clustermesh-apiserver-digest + digest: "sha256:3ac210d94d37a77ec010f9ac4c705edc8f15f22afa2b9a6f0e2a7d64d2360586" + useDigest: true + pullPolicy: "IfNotPresent" + # -- TCP port for the clustermesh-apiserver health API. + healthPort: 9880 + # -- Configuration for the clustermesh-apiserver readiness probe. + readinessProbe: {} + etcd: + # The etcd binary is included in the clustermesh API server image, so the same image from above is reused. + # Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is + # built with. + + # -- Specifies the resources for etcd container in the apiserver + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 1000m + # memory: 256Mi + + # -- Security context to be added to clustermesh-apiserver etcd containers + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- lifecycle setting for the etcd container + lifecycle: {} + init: + # -- Specifies the resources for etcd init container in the apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 100Mi + # limits: + # cpu: 100m + # memory: 100Mi + + # -- Additional arguments to `clustermesh-apiserver etcdinit`. + extraArgs: [] + # -- Additional environment variables to `clustermesh-apiserver etcdinit`. + extraEnv: [] + # @schema + # enum: [Disk, Memory] + # @schema + # -- Specifies whether etcd data is stored in a temporary volume backed by + # the node's default medium, such as disk, SSD or network storage (Disk), or + # RAM (Memory). The Memory option enables improved etcd read and write + # performance at the cost of additional memory usage, which counts against + # the memory limits of the container. + storageMedium: Disk + kvstoremesh: + # -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved + # from the remote clusters in the local etcd instance. + enabled: true + # -- TCP port for the KVStoreMesh health API. + healthPort: 9881 + # -- Configuration for the KVStoreMesh readiness probe. + readinessProbe: {} + # -- Additional KVStoreMesh arguments. + extraArgs: [] + # -- Additional KVStoreMesh environment variables. + extraEnv: [] + # -- Resource requests and limits for the KVStoreMesh container + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Additional KVStoreMesh volumeMounts. + extraVolumeMounts: [] + # -- KVStoreMesh Security context + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- lifecycle setting for the KVStoreMesh container + lifecycle: {} + service: + # -- The type of service used for apiserver access. + type: NodePort + # -- Optional port to use as the node port for apiserver access. + # + # WARNING: make sure to configure a different NodePort in each cluster if + # kube-proxy replacement is enabled, as Cilium is currently affected by a known + # bug (#24692) when NodePorts are handled by the KPR implementation. If a service + # with the same NodePort exists both in the local and the remote cluster, all + # traffic originating from inside the cluster and targeting the corresponding + # NodePort will be redirected to a local backend, regardless of whether the + # destination node belongs to the local or the remote cluster. + nodePort: 32379 + # -- Annotations for the clustermesh-apiserver service. + # Example annotations to configure an internal load balancer on different cloud providers: + # * AKS: service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # * EKS: service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" + # * GKE: networking.gke.io/load-balancer-type: "Internal" + annotations: {} + # -- Labels for the clustermesh-apiserver service. + labels: {} + # @schema + # enum: [Local, Cluster] + # @schema + # -- The externalTrafficPolicy of service used for apiserver access. + externalTrafficPolicy: Cluster + # @schema + # enum: [Local, Cluster] + # @schema + # -- The internalTrafficPolicy of service used for apiserver access. + internalTrafficPolicy: Cluster + # @schema + # enum: [HAOnly, Always, Never] + # @schema + # -- Defines when to enable session affinity. + # Each replica in a clustermesh-apiserver deployment runs its own discrete + # etcd cluster. Remote clients connect to one of the replicas through a + # shared Kubernetes Service. A client reconnecting to a different backend + # will require a full resync to ensure data integrity. Session affinity + # can reduce the likelihood of this happening, but may not be supported + # by all cloud providers. + # Possible values: + # - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. + # - "Always" Always enable session affinity. + # - "Never" Never enable session affinity. Useful in environments where + # session affinity is not supported, but may lead to slightly + # degraded performance due to more frequent reconnections. + enableSessionAffinity: "HAOnly" + # @schema + # type: [null, string] + # @schema + # -- Configure a loadBalancerClass. + # Allows to configure the loadBalancerClass on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer + # (requires Kubernetes 1.24+). + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP. + # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer. + loadBalancerIP: ~ + # -- Configure loadBalancerSourceRanges. + # Allows to configure the source IP ranges allowed to access the + # clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. + loadBalancerSourceRanges: [] + # -- Number of replicas run for the clustermesh-apiserver deployment. + replicas: 1 + # -- lifecycle setting for the apiserver container + lifecycle: {} + # -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment + terminationGracePeriodSeconds: 30 + # -- Additional clustermesh-apiserver arguments. + extraArgs: [] + # -- Additional clustermesh-apiserver environment variables. + extraEnv: [] + # -- Additional clustermesh-apiserver volumes. + extraVolumes: [] + # -- Additional clustermesh-apiserver volumeMounts. + extraVolumeMounts: [] + # -- Security context to be added to clustermesh-apiserver containers + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- Security context to be added to clustermesh-apiserver pods + podSecurityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + fsGroup: 65532 + # -- Annotations to be added to clustermesh-apiserver pods + podAnnotations: {} + # -- Labels to be added to clustermesh-apiserver pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- Resource requests and limits for the clustermesh-apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Affinity for clustermesh.apiserver + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + k8s-app: clustermesh-apiserver + topologyKey: kubernetes.io/hostname + # -- Pod topology spread constraints for clustermesh-apiserver + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- clustermesh-apiserver update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxSurge: 1 + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 0 + # -- The priority class to use for clustermesh-apiserver + priorityClassName: "" + tls: + # -- Configure the clustermesh authentication mode. + # Supported values: + # - legacy: All clusters access remote clustermesh instances with the same + # username (i.e., remote). The "remote" certificate must be + # generated with CN=remote if provided manually. + # - migration: Intermediate mode required to upgrade from legacy to cluster + # (and vice versa) with no disruption. Specifically, it enables + # the creation of the per-cluster usernames, while still using + # the common one for authentication. The "remote" certificate must + # be generated with CN=remote if provided manually (same as legacy). + # - cluster: Each cluster accesses remote etcd instances with a username + # depending on the local cluster name (i.e., remote-). + # The "remote" certificate must be generated with CN=remote- + # if provided manually. Cluster mode is meaningful only when the same + # CA is shared across all clusters part of the mesh. + authMode: legacy + # -- Allow users to provide their own certificates + # Users may need to provide their certificates using + # a mechanism that requires they provide their own secrets. + # This setting does not apply to any of the auto-generated + # mechanisms below, it only restricts the creation of secrets + # via the `tls-provided` templates. + enableSecrets: true + # -- Configure automatic TLS certificates generation. + # A Kubernetes CronJob is used the generate any + # certificates not provided by the user at installation + # time. + auto: + # -- When set to true, automatically generate a CA and certificates to + # enable mTLS between clustermesh-apiserver and external workload instances. + # If set to false, the certs to be provided by setting appropriate values below. + enabled: true + # Sets the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Due to the out-of-band distribution of client certs to external workloads the + # CA is (re)regenerated only if it is not provided as a helm value and the k8s + # secret is manually deleted. + # + # Defaults to none. Commented syntax gives midnight of the first day of every + # fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + # schedule: "0 0 1 */4 *" + + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. + # Used if 'auto' is not enabled. + server: + cert: "" + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. + # Used if 'auto' is not enabled. + admin: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. + # Used if 'auto' is not enabled. + client: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. + # Used if 'auto' is not enabled. + remote: + cert: "" + key: "" + # clustermesh-apiserver Prometheus metrics configuration + metrics: + # -- Enables exporting apiserver metrics in OpenMetrics format. + enabled: true + # -- Configure the port the apiserver metric server listens on. + port: 9962 + kvstoremesh: + # -- Enables exporting KVStoreMesh metrics in OpenMetrics format. + enabled: true + # -- Configure the port the KVStoreMesh metric server listens on. + port: 9964 + etcd: + # -- Enables exporting etcd metrics in OpenMetrics format. + enabled: true + # -- Set level of detail for etcd metrics; specify 'extensive' to include server side gRPC histogram metrics. + mode: basic + # -- Configure the port the etcd metric server listens on. + port: 9963 + serviceMonitor: + # -- Enable service monitor. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor clustermesh-apiserver + labels: {} + # -- Annotations to add to ServiceMonitor clustermesh-apiserver + annotations: {} + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + + # -- Interval for scrape metrics (apiserver metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + metricRelabelings: ~ + kvstoremesh: + # -- Interval for scrape metrics (KVStoreMesh metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + metricRelabelings: ~ + etcd: + # -- Interval for scrape metrics (etcd metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + metricRelabelings: ~ +# -- Configure external workloads support +externalWorkloads: + # -- Enable support for external workloads, such as VMs (false by default). + enabled: false +# -- Configure cgroup related configuration +cgroup: + autoMount: + # -- Enable auto mount of cgroup2 filesystem. + # When `autoMount` is enabled, cgroup2 filesystem is mounted at + # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: false + # -- Init Container Cgroup Automount resource limits & requests + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) + hostRoot: /sys/fs/cgroup +# -- Configure sysctl override described in #20072. +sysctlfix: + # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. + enabled: true +# -- Configure whether to enable auto detect of terminating state for endpoints +# in order to support graceful termination. +enableK8sTerminatingEndpoint: true +# -- Configure whether to unload DNS policy rules on graceful shutdown +# dnsPolicyUnloadOnShutdown: false + +# -- Configure the key of the taint indicating that Cilium is not ready on the node. +# When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. +agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" +dnsProxy: + # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. + socketLingerTimeout: 10 + # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. + dnsRejectResponseCode: refused + # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. + enableDnsCompression: true + # -- Maximum number of IPs to maintain per FQDN name for each endpoint. + endpointMaxIpPerHostname: 1000 + # -- Time during which idle but previously active connections with expired DNS lookups are still considered alive. + idleConnectionGracePeriod: 0s + # -- Maximum number of IPs to retain for expired DNS lookups with still-active connections. + maxDeferredConnectionDeletes: 10000 + # -- The minimum time, in seconds, to use DNS data for toFQDNs policies. If + # the upstream DNS server returns a DNS record with a shorter TTL, Cilium + # overwrites the TTL with this value. Setting this value to zero means that + # Cilium will honor the TTLs returned by the upstream DNS server. + minTtl: 0 + # -- DNS cache data at this path is preloaded on agent startup. + preCache: "" + # -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. + proxyPort: 0 + # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. + proxyResponseMaxDelay: 100ms + # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) + # enableTransparentMode: true +# -- SCTP Configuration Values +sctp: + # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. + enabled: false +# -- Enable Non-Default-Deny policies +enableNonDefaultDenyPolicies: true +# Configuration for types of authentication for Cilium (beta) +authentication: + # -- Enable authentication processing and garbage collection. + # Note that if disabled, policy enforcement will still block requests that require authentication. + # But the resulting authentication requests for these requests will not be processed, therefore the requests not be allowed. + enabled: true + # -- Buffer size of the channel Cilium uses to receive authentication events from the signal map. + queueSize: 1024 + # -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers. + rotatedIdentitiesQueueSize: 1024 + # -- Interval for garbage collection of auth map entries. + gcInterval: "5m0s" + # Configuration for Cilium's service-to-service mutual authentication using TLS handshakes. + # Note that this is not full mTLS support without also enabling encryption of some form. + # Current encryption options are WireGuard or IPsec, configured in encryption block above. + mutual: + # -- Port on the agent where mutual authentication handshakes between agents will be performed + port: 4250 + # -- Timeout for connecting to the remote node TCP socket + connectTimeout: 5s + # Settings for SPIRE + spire: + # -- Enable SPIRE integration (beta) + enabled: false + # -- Annotations to be added to all top-level spire objects (resources under templates/spire) + annotations: {} + # Settings to control the SPIRE installation and configuration + install: + # -- Enable SPIRE installation. + # This will only take effect only if authentication.mutual.spire.enabled is true + enabled: true + # -- SPIRE namespace to install into + namespace: cilium-spire + # -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. + existingNamespace: false + # -- init container image of SPIRE agent and server + initImage: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "docker.io/library/busybox" + tag: "1.37.0" + digest: "sha256:d82f458899c9696cb26a7c02d5568f81c8c8223f8661bb2a7988b269c8b9051e" + useDigest: true + pullPolicy: "IfNotPresent" + # SPIRE agent configuration + agent: + # -- The priority class to use for the spire agent + priorityClassName: "" + # -- SPIRE agent image + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "ghcr.io/spiffe/spire-agent" + tag: "1.9.6" + digest: "sha256:5106ac601272a88684db14daf7f54b9a45f31f77bb16a906bd5e87756ee7b97c" + useDigest: true + pullPolicy: "IfNotPresent" + # -- SPIRE agent service account + serviceAccount: + create: true + name: spire-agent + # -- SPIRE agent annotations + annotations: {} + # -- SPIRE agent labels + labels: {} + # -- container resource limits & requests + resources: {} + # -- SPIRE Workload Attestor kubelet verification. + skipKubeletVerification: true + # -- SPIRE agent tolerations configuration + # By default it follows the same tolerations as the agent itself + # to allow the Cilium agent on this node to connect to SPIRE. + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node.cloudprovider.kubernetes.io/uninitialized + effect: NoSchedule + value: "true" + - key: CriticalAddonsOnly + operator: "Exists" + # -- SPIRE agent affinity configuration + affinity: {} + # -- SPIRE agent nodeSelector configuration + # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- Security context to be added to spire agent pods. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # -- Security context to be added to spire agent containers. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + server: + # -- The priority class to use for the spire server + priorityClassName: "" + # -- SPIRE server image + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "ghcr.io/spiffe/spire-server" + tag: "1.9.6" + digest: "sha256:59a0b92b39773515e25e68a46c40d3b931b9c1860bc445a79ceb45a805cab8b4" + useDigest: true + pullPolicy: "IfNotPresent" + # -- SPIRE server service account + serviceAccount: + create: true + name: spire-server + # -- SPIRE server init containers + initContainers: [] + # -- SPIRE server annotations + annotations: {} + # -- SPIRE server labels + labels: {} + # SPIRE server service configuration + # -- container resource limits & requests + resources: {} + service: + # -- Service type for the SPIRE server service + type: ClusterIP + # -- Annotations to be added to the SPIRE server service + annotations: {} + # -- Labels to be added to the SPIRE server service + labels: {} + # -- SPIRE server affinity configuration + affinity: {} + # -- SPIRE server nodeSelector configuration + # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- SPIRE server tolerations configuration + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # SPIRE server datastorage configuration + dataStorage: + # -- Enable SPIRE server data storage + enabled: true + # -- Size of the SPIRE server data storage + size: 1Gi + # -- Access mode of the SPIRE server data storage + accessMode: ReadWriteOnce + # @schema + # type: [null, string] + # @schema + # -- StorageClass of the SPIRE server data storage + storageClass: null + # -- Security context to be added to spire server pods. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # -- Security context to be added to spire server containers. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + # SPIRE CA configuration + ca: + # -- SPIRE CA key type + # AWS requires the use of RSA. EC cryptography is not supported + keyType: "rsa-4096" + # -- SPIRE CA Subject + subject: + country: "US" + organization: "SPIRE" + commonName: "Cilium SPIRE CA" + # @schema + # type: [null, string] + # @schema + # -- SPIRE server address used by Cilium Operator + # + # If k8s Service DNS along with port number is used (e.g. ..svc(.*): format), + # Cilium Operator will resolve its address by looking up the clusterIP from Service resource. + # + # Example values: 10.0.0.1:8081, spire-server.cilium-spire.svc:8081 + serverAddress: ~ + # -- SPIFFE trust domain to use for fetching certificates + trustDomain: spiffe.cilium + # -- SPIRE socket path where the SPIRE delegated api agent is listening + adminSocketPath: /run/spire/sockets/admin.sock + # -- SPIRE socket path where the SPIRE workload agent is listening. + # Applies to both the Cilium Agent and Operator + agentSocketPath: /run/spire/sockets/agent/agent.sock + # -- SPIRE connection timeout + connectionTimeout: 30s +# -- Enable Internal Traffic Policy +enableInternalTrafficPolicy: true +# -- Enable LoadBalancer IP Address Management +enableLBIPAM: true + diff --git a/cilium/src/values1.17.8.yaml b/cilium/src/values1.17.8.yaml index 65566c6..6142cbf 100644 --- a/cilium/src/values1.17.8.yaml +++ b/cilium/src/values1.17.8.yaml @@ -20,7 +20,7 @@ commonLabels: {} # Cilium will not change critical values to ensure continued operation # This flag is not required for new installations. # For example: '1.7', '1.8', '1.9' -upgradeCompatibility: null +upgradeCompatibility: 1.17.1 debug: # -- Enable debug logging enabled: false @@ -53,12 +53,12 @@ iptablesRandomFully: false # @default -- `"~/.kube/config"` kubeConfigPath: "" # -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap -k8sServiceHost: "" +k8sServiceHost: localhost # @schema # type: [string, integer] # @schema # -- (string) Kubernetes service port -k8sServicePort: "" +k8sServicePort: 7445 # @schema # type: [null, string] # @schema @@ -80,14 +80,14 @@ k8sClientRateLimit: # @schema # -- (int) The sustained request rate in requests per second. # @default -- 10 - qps: + qps: 20 # @schema # type: [null, integer] # @schema # -- (int) The burst request rate in requests per second. # The rate limiter will allow short bursts with a higher rate. # @default -- 20 - burst: + burst: 100 # -- Configure the client side rate limit for the Cilium Operator operator: # @schema @@ -110,11 +110,11 @@ cluster: # * It must begin and end with a lower case alphanumeric character; # * It may contain lower case alphanumeric characters and dashes between. # The "default" name cannot be used if the Cluster ID is different from 0. - name: default + name: talos # -- (int) Unique ID of the cluster. Must be unique across all connected # clusters and in the range of 1 to 255. Only required for Cluster Mesh, # may be 0 if Cluster Mesh is not used. - id: 0 + id: 1 # -- Define serviceAccount names for components. # @default -- Component's fully qualified name. serviceAccounts: @@ -183,7 +183,7 @@ agent: true # -- Agent container name. name: cilium # -- Roll out cilium agent pods automatically when configmap is updated. -rollOutCiliumPods: false +rollOutCiliumPods: true # -- Agent container image. image: # @schema @@ -314,7 +314,7 @@ securityContext: # Used since cilium monitor uses mmap - IPC_LOCK # Used in iptables. Consider removing once we are iptables-free - - SYS_MODULE + #- SYS_MODULE # Needed to switch network namespaces (used for health endpoint, socket-LB). # We need it for now but might not need it for >= 5.11 specially # for the 'SYS_RESOURCE'. @@ -358,7 +358,7 @@ securityContext: # Used since cilium modifies routing tables, etc... - NET_ADMIN # Used in iptables. Consider removing once we are iptables-free - - SYS_MODULE + #- SYS_MODULE # We need it for now but might not need it for >= 5.11 specially # for the 'SYS_RESOURCE'. # In >= 5.8 there's already BPF and PERMON capabilities @@ -431,7 +431,7 @@ highScaleIPcache: # -- Configure L2 announcements l2announcements: # -- Enable L2 announcements - enabled: false + enabled: true # -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked # leaseDuration: 15s # -- The interval at which the leader will renew the lease @@ -619,7 +619,7 @@ bpf: # the kernel supports it. The latter has the implication that it will also # bypass netfilter in the host namespace. # @default -- `false` - hostLegacyRouting: ~ + hostLegacyRouting: true # @schema # type: [null, boolean] # @schema @@ -793,7 +793,7 @@ daemon: # masqueraded (to an output device IPv4 address), if the output device runs the # program. When not specified, probing will automatically detect devices that have # a non-local route. This should be used only when autodetection is not suitable. -# devices: "" +devices: eth+ # -- Enables experimental support for the detection of new and removed datapath # devices. When devices change the eBPF datapath is reloaded and services updated. @@ -855,15 +855,15 @@ envoyConfig: ingressController: # -- Enable cilium ingress controller # This will automatically set enable-envoy-config as well. - enabled: false + enabled: true # -- Set cilium ingress controller to be the default ingress controller # This will let cilium ingress controller route entries without ingress class set - default: false + default: true # -- Default ingress load balancer mode # Supported values: shared, dedicated # For granular control, use the following annotations on the ingress resource: # "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). - loadbalancerMode: dedicated + loadbalancerMode: shared # -- Enforce https for host having matching TLS host in Ingress. # Incoming traffic to http listener will return 308 http error code with respective location in header. enforceHttps: true @@ -898,7 +898,8 @@ ingressController: # -- Labels to be added for the shared LB service labels: {} # -- Annotations to be added for the shared LB service - annotations: {} + annotations: + io.cilium/lb-ipam-ips: 192.168.0.180 # -- Service type for the shared LB service type: LoadBalancer # @schema @@ -948,7 +949,7 @@ ingressController: gatewayAPI: # -- Enable support for Gateway API in cilium # This will automatically set enable-envoy-config as well. - enabled: false + enabled: true # -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. enableProxyProtocol: false # -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. @@ -1439,9 +1440,9 @@ hubble: extraIpAddresses: [] relay: # -- Enable Hubble Relay (requires hubble.enabled=true) - enabled: false + enabled: true # -- Roll out Hubble Relay pods automatically when configmap is updated. - rollOutPods: false + rollOutPods: true # -- Hubble-relay container image. image: # @schema @@ -1656,7 +1657,7 @@ hubble: port: 6062 ui: # -- Whether to enable the Hubble UI. - enabled: false + enabled: true standalone: # -- When true, it will allow installing the Hubble UI only, without checking dependencies. # It is useful if a cluster already has cilium and Hubble relay installed and you just @@ -1680,7 +1681,7 @@ hubble: # - key: ca.crt # path: hubble-relay-ca.crt # -- Roll out Hubble-ui pods automatically when configmap is updated. - rollOutPods: false + rollOutPods: true tls: client: # -- Name of the Secret containing the client certificate and key for Hubble UI @@ -1912,7 +1913,7 @@ installNoConntrackIptablesRules: false ipam: # -- Configure IP Address Management mode. # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ - mode: "cluster-pool" + mode: kubernetes # -- Maximum rate at which the CiliumNode custom resource is updated. ciliumNodeUpdateRate: "15s" # -- Pre-allocation settings for IPAM in Multi-Pool mode @@ -2026,7 +2027,7 @@ readinessProbe: # -- Configure the kube-proxy replacement in Cilium BPF datapath # Valid options are "true" or "false". # ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ -#kubeProxyReplacement: "false" +kubeProxyReplacement: true # -- healthz server bind address for the kube-proxy replacement. # To enable set the value to '0.0.0.0:10256' for all ipv4 @@ -2129,7 +2130,7 @@ loadBalancer: # -- algorithm is the name of the load balancing algorithm for backend # selection e.g. random or maglev - # algorithm: random + algorithm: maglev # -- mode is the operation mode of load balancing for remote backends # e.g. snat, dsr, hybrid @@ -2472,14 +2473,14 @@ envoy: # We need it for now but might not need it for >= 5.11 specially # for the 'SYS_RESOURCE'. # In >= 5.8 there's already BPF and PERMON capabilities - - SYS_ADMIN + #- SYS_ADMIN # Both PERFMON and BPF requires kernel 5.8, container runtime # cri-o >= v1.22.0 or containerd >= v1.5.0. # If available, SYS_ADMIN can be removed. - #- PERFMON - #- BPF + - PERFMON + - BPF # -- Keep capability `NET_BIND_SERVICE` for Envoy process. - keepCapNetBindService: false + keepCapNetBindService: true # -- Affinity for cilium-envoy. affinity: podAntiAffinity: @@ -2711,7 +2712,7 @@ operator: # -- Enable the cilium-operator component (required). enabled: true # -- Roll out cilium-operator pods automatically when configmap is updated. - rollOutPods: false + rollOutPods: true # -- cilium-operator image. image: # @schema @@ -3565,7 +3566,7 @@ cgroup: # If users disable `autoMount`, it's expected that users have mounted # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the # volume will be mounted inside the cilium agent pod at the same path. - enabled: true + enabled: false # -- Init Container Cgroup Automount resource limits & requests resources: {} # limits: @@ -3575,7 +3576,7 @@ cgroup: # cpu: 100m # memory: 128Mi # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) - hostRoot: /run/cilium/cgroupv2 + hostRoot: /sys/fs/cgroup # -- Configure sysctl override described in #20072. sysctlfix: # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. diff --git a/cilium/src/values1.18.2.yaml b/cilium/src/values1.18.2.yaml index 078b397..7993330 100644 --- a/cilium/src/values1.18.2.yaml +++ b/cilium/src/values1.18.2.yaml @@ -20,7 +20,7 @@ commonLabels: {} # Cilium will not change critical values to ensure continued operation # This flag is not required for new installations. # For example: '1.7', '1.8', '1.9' -upgradeCompatibility: null +upgradeCompatibility: 1.17.8 debug: # -- Enable debug logging enabled: false