From 4786b2ff422285ad228d94fbcc0b994ab88d9b59 Mon Sep 17 00:00:00 2001 From: Philip Haupt <“der.mad.mob@gmail.com”> Date: Thu, 23 Oct 2025 20:55:38 +0200 Subject: [PATCH] cilium values --- cilium/src/values.yaml | 3 - cilium/src/values1.17.8.yaml | 3822 ++++++++++++++++++++++++++++++++ cilium/src/values1.18.2.yaml | 4012 ++++++++++++++++++++++++++++++++++ 3 files changed, 7834 insertions(+), 3 deletions(-) create mode 100644 cilium/src/values1.17.8.yaml create mode 100644 cilium/src/values1.18.2.yaml diff --git a/cilium/src/values.yaml b/cilium/src/values.yaml index 231fd1b..3483d74 100644 --- a/cilium/src/values.yaml +++ b/cilium/src/values.yaml @@ -1,6 +1,3 @@ -cluster: - name: talos - id: 1 bpf: hostLegacyRouting: true diff --git a/cilium/src/values1.17.8.yaml b/cilium/src/values1.17.8.yaml new file mode 100644 index 0000000..65566c6 --- /dev/null +++ b/cilium/src/values1.17.8.yaml @@ -0,0 +1,3822 @@ +# File generated by install/kubernetes/Makefile; DO NOT EDIT. +# This file is based on install/kubernetes/cilium/*values.yaml.tmpl. + + +# @schema +# type: [null, string] +# @schema +# -- namespaceOverride allows to override the destination namespace for Cilium resources. +# This property allows to use Cilium as part of an Umbrella Chart with different targets. +namespaceOverride: "" +# @schema +# type: [null, object] +# @schema +# -- commonLabels allows users to add common labels for all Cilium resources. +commonLabels: {} +# @schema +# type: [null, string] +# @schema +# -- upgradeCompatibility helps users upgrading to ensure that the configMap for +# Cilium will not change critical values to ensure continued operation +# This flag is not required for new installations. +# For example: '1.7', '1.8', '1.9' +upgradeCompatibility: null +debug: + # -- Enable debug logging + enabled: false + # @schema + # type: [null, string] + # @schema + # -- Configure verbosity levels for debug logging + # This option is used to enable debug messages for operations related to such + # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is + # for enabling debug messages emitted per request, message and connection. + # Multiple values can be set via a space-separated string (e.g. "datapath envoy"). + # + # Applicable values: + # - flow + # - kvstore + # - envoy + # - datapath + # - policy + verbose: ~ +rbac: + # -- Enable creation of Resource-Based Access Control configuration. + create: true +# -- Configure image pull secrets for pulling container images +imagePullSecrets: [] +# - name: "image-pull-secret" + +# -- Configure iptables--random-fully. Disabled by default. View https://github.com/cilium/cilium/issues/13037 for more information. +iptablesRandomFully: false +# -- (string) Kubernetes config path +# @default -- `"~/.kube/config"` +kubeConfigPath: "" +# -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap +k8sServiceHost: "" +# @schema +# type: [string, integer] +# @schema +# -- (string) Kubernetes service port +k8sServicePort: "" +# @schema +# type: [null, string] +# @schema +# -- (string) When `k8sServiceHost=auto`, allows to customize the configMap name. It defaults to `cluster-info`. +k8sServiceLookupConfigMapName: "" +# @schema +# type: [null, string] +# @schema +# -- (string) When `k8sServiceHost=auto`, allows to customize the namespace that contains `k8sServiceLookupConfigMapName`. It defaults to `kube-public`. +k8sServiceLookupNamespace: "" +# -- Configure the client side rate limit for the agent +# +# If the amount of requests to the Kubernetes API server exceeds the configured +# rate limit, the agent will start to throttle requests by delaying +# them until there is budget or the request times out. +k8sClientRateLimit: + # @schema + # type: [null, integer] + # @schema + # -- (int) The sustained request rate in requests per second. + # @default -- 10 + qps: + # @schema + # type: [null, integer] + # @schema + # -- (int) The burst request rate in requests per second. + # The rate limiter will allow short bursts with a higher rate. + # @default -- 20 + burst: + # -- Configure the client side rate limit for the Cilium Operator + operator: + # @schema + # type: [null, integer] + # @schema + # -- (int) The sustained request rate in requests per second. + # @default -- 100 + qps: + # @schema + # type: [null, integer] + # @schema + # -- (int) The burst request rate in requests per second. + # The rate limiter will allow short bursts with a higher rate. + # @default -- 200 + burst: +cluster: + # -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. + # It must respect the following constraints: + # * It must contain at most 32 characters; + # * It must begin and end with a lower case alphanumeric character; + # * It may contain lower case alphanumeric characters and dashes between. + # The "default" name cannot be used if the Cluster ID is different from 0. + name: default + # -- (int) Unique ID of the cluster. Must be unique across all connected + # clusters and in the range of 1 to 255. Only required for Cluster Mesh, + # may be 0 if Cluster Mesh is not used. + id: 0 +# -- Define serviceAccount names for components. +# @default -- Component's fully qualified name. +serviceAccounts: + cilium: + create: true + name: cilium + automount: true + annotations: {} + nodeinit: + create: true + # -- Enabled is temporary until https://github.com/cilium/cilium-cli/issues/1396 is implemented. + # Cilium CLI doesn't create the SAs for node-init, thus the workaround. Helm is not affected by + # this issue. Name and automount can be configured, if enabled is set to true. + # Otherwise, they are ignored. Enabled can be removed once the issue is fixed. + # Cilium-nodeinit DS must also be fixed. + enabled: false + name: cilium-nodeinit + automount: true + annotations: {} + envoy: + create: true + name: cilium-envoy + automount: true + annotations: {} + operator: + create: true + name: cilium-operator + automount: true + annotations: {} + preflight: + create: true + name: cilium-pre-flight + automount: true + annotations: {} + relay: + create: true + name: hubble-relay + automount: false + annotations: {} + ui: + create: true + name: hubble-ui + automount: true + annotations: {} + clustermeshApiserver: + create: true + name: clustermesh-apiserver + automount: true + annotations: {} + # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob + clustermeshcertgen: + create: true + name: clustermesh-apiserver-generate-certs + automount: true + annotations: {} + # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob + hubblecertgen: + create: true + name: hubble-generate-certs + automount: true + annotations: {} +# -- Configure termination grace period for cilium-agent DaemonSet. +terminationGracePeriodSeconds: 1 +# -- Install the cilium agent resources. +agent: true +# -- Agent container name. +name: cilium +# -- Roll out cilium agent pods automatically when configmap is updated. +rollOutCiliumPods: false +# -- Agent container image. +image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.17.8" + pullPolicy: "IfNotPresent" + # cilium-digest + digest: "sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636" + useDigest: true +# -- Scheduling configurations for cilium pods +scheduling: + # @schema + # enum: ["anti-affinity", "kube-scheduler"] + # @schema + # -- Mode specifies how Cilium daemonset pods should be scheduled to Nodes. + # `anti-affinity` mode applies a pod anti-affinity rule to the cilium daemonset. + # Pod anti-affinity may significantly impact scheduling throughput for large clusters. + # See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + # `kube-scheduler` mode forgoes the anti-affinity rule for full scheduling throughput. + # Kube-scheduler avoids host port conflict when scheduling pods. + # @default -- Defaults to apply a pod anti-affinity rule to the agent pod - `anti-affinity` + mode: anti-affinity +# -- Affinity for cilium-agent. +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium +# -- Node selector for cilium-agent. +nodeSelector: + kubernetes.io/os: linux +# -- Node tolerations for agent scheduling to nodes with taints +# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +# -- The priority class to use for cilium-agent. +priorityClassName: "" +# -- DNS policy for Cilium agent pods. +# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: "" +# -- Additional containers added to the cilium DaemonSet. +extraContainers: [] +# -- Additional initContainers added to the cilium Daemonset. +extraInitContainers: [] +# -- Additional agent container arguments. +extraArgs: [] +# -- Additional agent container environment variables. +extraEnv: [] +# -- Additional agent hostPath mounts. +extraHostPathMounts: [] +# - name: host-mnt-data +# mountPath: /host/mnt/data +# hostPath: /mnt/data +# hostPathType: Directory +# readOnly: true +# mountPropagation: HostToContainer + +# -- Additional agent volumes. +extraVolumes: [] +# -- Additional agent volumeMounts. +extraVolumeMounts: [] +# -- extraConfig allows you to specify additional configuration parameters to be +# included in the cilium-config configmap. +extraConfig: {} +# my-config-a: "1234" +# my-config-b: |- +# test 1 +# test 2 +# test 3 + +# -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent) +annotations: {} +# -- Security Context for cilium-agent pods. +podSecurityContext: + # -- AppArmorProfile options for the `cilium-agent` and init containers + appArmorProfile: + type: "Unconfined" + seccompProfile: + type: "Unconfined" +# -- Annotations to be added to agent pods +podAnnotations: {} +# -- Labels to be added to agent pods +podLabels: {} +# -- Agent resource limits & requests +# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +resources: {} +# limits: +# cpu: 4000m +# memory: 4Gi +# requests: +# cpu: 100m +# memory: 512Mi + +# -- resources & limits for the agent init containers +initResources: {} +securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-agent` and init containers + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-agent` container + ciliumAgent: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # Needed to switch network namespaces (used for health endpoint, socket-LB). + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + # Allow discretionary access control (e.g. required for package installation) + - DAC_OVERRIDE + # Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation) + - FOWNER + # Allow to execute program that changes GID (e.g. required for package installation) + - SETGID + # Allow to execute program that changes UID (e.g. required for package installation) + - SETUID + # -- Capabilities for the `mount-cgroup` init container + mountCgroup: + # Only used for 'mount' cgroup + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- capabilities for the `apply-sysctl-overwrites` init container + applySysctlOverwrites: + # Required in order to access host's /etc/sysctl.d dir + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- Capabilities for the `clean-cilium-state` init container + cleanCiliumState: + # Most of the capabilities here are the same ones used in the + # cilium-agent's container because this container can be used to + # uninstall all Cilium resources, and therefore it is likely that + # will need the same capabilities. + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF +# -- Cilium agent update strategy +updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 2 +# Configuration Values for cilium-agent +aksbyocni: + # -- Enable AKS BYOCNI integration. + # Note that this is incompatible with AKS clusters not created in BYOCNI mode: + # use Azure integration (`azure.enabled`) instead. + enabled: false +# @schema +# type: [boolean, string] +# @schema +# -- Enable installation of PodCIDR routes between worker +# nodes if worker nodes share a common L2 network segment. +autoDirectNodeRoutes: false +# -- Enable skipping of PodCIDR routes between worker +# nodes if the worker nodes are in a different L2 network segment. +directRoutingSkipUnreachable: false +# -- Annotate k8s node upon initialization with Cilium's metadata. +annotateK8sNode: false +azure: + # -- Enable Azure integration. + # Note that this is incompatible with AKS clusters created in BYOCNI mode: use + # AKS BYOCNI integration (`aksbyocni.enabled`) instead. + enabled: false + # usePrimaryAddress: false + # resourceGroup: group1 + # subscriptionID: 00000000-0000-0000-0000-000000000000 + # tenantID: 00000000-0000-0000-0000-000000000000 + # clientID: 00000000-0000-0000-0000-000000000000 + # clientSecret: 00000000-0000-0000-0000-000000000000 + # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000 +alibabacloud: + # -- Enable AlibabaCloud ENI integration + enabled: false +# -- Enable bandwidth manager to optimize TCP and UDP workloads and allow +# for rate-limiting traffic from individual Pods with EDT (Earliest Departure +# Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. +bandwidthManager: + # -- Enable bandwidth manager infrastructure (also prerequirement for BBR) + enabled: false + # -- Activate BBR TCP congestion control for Pods + bbr: false +# -- Configure standalone NAT46/NAT64 gateway +nat46x64Gateway: + # -- Enable RFC8215-prefixed translation + enabled: false +# -- EnableHighScaleIPcache enables the special ipcache mode for high scale +# clusters. The ipcache content will be reduced to the strict minimum and +# traffic will be encapsulated to carry security identities. +highScaleIPcache: + # -- Enable the high scale mode for the ipcache. + enabled: false +# -- Configure L2 announcements +l2announcements: + # -- Enable L2 announcements + enabled: false + # -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked + # leaseDuration: 15s + # -- The interval at which the leader will renew the lease + # leaseRenewDeadline: 5s + # -- The timeout between retries if renewal fails + # leaseRetryPeriod: 2s +# -- Configure L2 pod announcements +l2podAnnouncements: + # -- Enable L2 pod announcements + enabled: false + # -- Interface used for sending Gratuitous ARP pod announcements + interface: "eth0" +# -- This feature set enables virtual BGP routers to be created via +# CiliumBGPPeeringPolicy CRDs. +bgpControlPlane: + # -- Enables the BGP control plane. + enabled: false + # -- SecretsNamespace is the namespace which BGP support will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for BGP secrets. + create: false + # -- The name of the secret namespace to which Cilium agents are given read access + name: kube-system + # -- Status reporting settings (BGPv2 only) + statusReport: + # -- Enable/Disable BGPv2 status reporting + # It is recommended to enable status reporting in general, but if you have any issue + # such as high API server load, you can disable it by setting this to false. + enabled: true +pmtuDiscovery: + # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to + # the client. + enabled: false +bpf: + autoMount: + # -- Enable automatic mount of BPF filesystem + # When `autoMount` is enabled, the BPF filesystem is mounted at + # `bpf.root` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # bpffs filesystem at the specified `bpf.root` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: true + # -- Configure the mount point for the BPF filesystem + root: /sys/fs/bpf + # -- Enables pre-allocation of eBPF map values. This increases + # memory usage but can reduce latency. + preallocateMaps: false + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries in auth map. + # @default -- `524288` + authMapMax: ~ + # -- Enable CT accounting for packets and bytes + ctAccounting: false + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries in the TCP connection tracking + # table. + # @default -- `524288` + ctTcpMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the non-TCP connection + # tracking table. + # @default -- `262144` + ctAnyMax: ~ + # -- Control to use a distributed per-CPU backend memory for the core BPF LRU maps + # which Cilium uses. This improves performance significantly, but it is also + # recommended to increase BPF map sizing along with that. + distributedLRU: + # -- Enable distributed LRU backend memory. For compatibility with existing + # installations it is off by default. + enabled: false + # -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. + # Helm configuration for BPF events map rate limiting is experimental and might change + # in upcoming releases. + events: + # -- Default settings for all types of events except dbg and pcap. + default: + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the limit of messages per second that can be written to + # BPF events map. The number of messages is averaged, meaning that if no messages + # were written to the map over 5 seconds, it's possible to write more events + # in the 6th second. If rateLimit is greater than 0, non-zero value for burstLimit must + # also be provided lest the configuration is considered invalid. Setting both burstLimit + # and rateLimit to 0 disables BPF events rate limiting. + # @default -- `0` + rateLimit: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of messages that can be written to BPF events + # map in 1 second. If burstLimit is greater than 0, non-zero value for rateLimit must + # also be provided lest the configuration is considered invalid. Setting both burstLimit + # and rateLimit to 0 disables BPF events rate limiting. + # @default -- `0` + burstLimit: ~ + drop: + # -- Enable drop events. + enabled: true + policyVerdict: + # -- Enable policy verdict events. + enabled: true + trace: + # -- Enable trace events. + enabled: true + # @schema + # type: [null, integer] + # @schema + # -- Configure the maximum number of service entries in the + # load balancer maps. + lbMapMax: 65536 + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the NAT table. + # @default -- `524288` + natMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the neighbor table. + # @default -- `524288` + neighMax: ~ + # @schema + # type: [null, integer] + # @schema + # @default -- `16384` + # -- (int) Configures the maximum number of entries for the node table. + nodeMapMax: ~ + # -- Configure the maximum number of entries in endpoint policy map (per endpoint). + # @schema + # type: [null, integer] + # @schema + policyMapMax: 16384 + # @schema + # type: [null, number, string] + # @schema + # -- (float64) Configure auto-sizing for all BPF maps based on available memory. + # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ + # @default -- `0.0025` + mapDynamicSizeRatio: ~ + # -- Configure the level of aggregation for monitor notifications. + # Valid options are none, low, medium, maximum. + monitorAggregation: medium + # -- Configure the typical time between monitor notifications for + # active connections. + monitorInterval: "5s" + # -- Configure which TCP flags trigger notifications when seen for the + # first time in a connection. + monitorFlags: "all" + # -- (bool) Allow cluster external access to ClusterIP services. + # @default -- `false` + lbExternalClusterIP: false + # -- (bool) Enable loadBalancerSourceRanges CIDR filtering for all service + # types, not just LoadBalancer services. The corresponding NodePort and + # ClusterIP (if enabled for cluster-external traffic) will also apply the + # CIDR filter. + # @default -- `false` + lbSourceRangeAllTypes: false + # -- (bool) Enable the option to define the load balancing algorithm on + # a per-service basis through service.cilium.io/lb-algorithm annotation. + # @default -- `false` + lbAlgorithmAnnotation: false + # -- (bool) Enable the option to define the load balancing mode (SNAT or DSR) + # on a per-service basis through service.cilium.io/forwarding-mode annotation. + # @default -- `false` + lbModeAnnotation: false + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Enable native IP masquerade support in eBPF + # @default -- `false` + masquerade: ~ + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Configure whether direct routing mode should route traffic via + # host stack (true) or directly and more efficiently out of BPF (false) if + # the kernel supports it. The latter has the implication that it will also + # bypass netfilter in the host namespace. + # @default -- `false` + hostLegacyRouting: ~ + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Configure the eBPF-based TPROXY (beta) to reduce reliance on iptables rules + # for implementing Layer 7 policy. + # @default -- `false` + tproxy: ~ + # @schema + # type: [null, array] + # @schema + # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass. + # [0] will allow all VLAN id's without any filtering. + # @default -- `[]` + vlanBypass: ~ + # -- (bool) Disable ExternalIP mitigation (CVE-2020-8554) + # @default -- `false` + disableExternalIPMitigation: false + # -- (bool) Attach endpoint programs using tcx instead of legacy tc hooks on + # supported kernels. + # @default -- `true` + enableTCX: true + # -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) + # @default -- `veth` + datapathMode: veth +# -- Enable BPF clock source probing for more efficient tick retrieval. +bpfClockProbe: false +# -- Clean all eBPF datapath state from the initContainer of the cilium-agent +# DaemonSet. +# +# WARNING: Use with care! +cleanBpfState: false +# -- Clean all local Cilium state from the initContainer of the cilium-agent +# DaemonSet. Implies cleanBpfState: true. +# +# WARNING: Use with care! +cleanState: false +# -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" +# init container before launching cilium-agent. +# More context can be found in the commit message of below PR +# https://github.com/cilium/cilium/pull/20123 +waitForKubeProxy: false +cni: + # -- Install the CNI configuration and binary files into the filesystem. + install: true + # -- Remove the CNI configuration and binary files on agent shutdown. Enable this + # if you're removing Cilium from the cluster. Disable this to prevent the CNI + # configuration file from being removed during agent upgrade, which can cause + # nodes to go unmanageable. + uninstall: false + # @schema + # type: [null, string] + # @schema + # -- Configure chaining on top of other CNI plugins. Possible values: + # - none + # - aws-cni + # - flannel + # - generic-veth + # - portmap + chainingMode: ~ + # @schema + # type: [null, string] + # @schema + # -- A CNI network name in to which the Cilium plugin should be added as a chained plugin. + # This will cause the agent to watch for a CNI network with this network name. When it is + # found, this will be used as the basis for Cilium's CNI configuration file. If this is + # set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode + # of aws-cni implies a chainingTarget of aws-cni. + chainingTarget: ~ + # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the + # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. + # This ensures no Pods can be scheduled using other CNI plugins during Cilium + # agent downtime. + exclusive: true + # -- Configure the log file for CNI logging with retention policy of 7 days. + # Disable CNI file logging by setting this field to empty explicitly. + logFile: /var/run/cilium/cilium-cni.log + # -- Skip writing of the CNI configuration. This can be used if + # writing of the CNI configuration is performed by external automation. + customConf: false + # -- Configure the path to the CNI configuration directory on the host. + confPath: /etc/cni/net.d + # -- Configure the path to the CNI binary directory on the host. + binPath: /opt/cni/bin + # -- Specify the path to a CNI config to read from on agent start. + # This can be useful if you want to manage your CNI + # configuration outside of a Kubernetes environment. This parameter is + # mutually exclusive with the 'cni.configMap' parameter. The agent will + # write this to 05-cilium.conflist on startup. + # readCniConf: /host/etc/cni/net.d/05-sample.conflist.input + + # -- When defined, configMap will mount the provided value as ConfigMap and + # interpret the cniConf variable as CNI configuration file and write it + # when the agent starts up + # configMap: cni-configuration + + # -- Configure the key in the CNI ConfigMap to read the contents of + # the CNI configuration from. + configMapKey: cni-config + # -- Configure the path to where to mount the ConfigMap inside the agent pod. + confFileMountPath: /tmp/cni-configuration + # -- Configure the path to where the CNI configuration directory is mounted + # inside the agent pod. + hostConfDirMountPath: /host/etc/cni/net.d + # -- Specifies the resources for the cni initContainer + resources: + requests: + cpu: 100m + memory: 10Mi + # -- Enable route MTU for pod netns when CNI chaining is used + enableRouteMTUForCNIChaining: false +# -- (string) Configure how frequently garbage collection should occur for the datapath +# connection tracking table. +# @default -- `"0s"` +conntrackGCInterval: "" +# -- (string) Configure the maximum frequency for the garbage collection of the +# connection tracking table. Only affects the automatic computation for the frequency +# and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently +# clean up unused identities created from ToFQDN policies. +conntrackGCMaxInterval: "" +# -- (string) Configure timeout in which Cilium will exit if CRDs are not available +# @default -- `"5m"` +crdWaitTimeout: "" +# -- Tail call hooks for custom eBPF programs. +customCalls: + # -- Enable tail call hooks for custom eBPF programs. + enabled: false +daemon: + # -- Configure where Cilium runtime state should be stored. + runPath: "/var/run/cilium" + # @schema + # type: [null, string] + # @schema + # -- Configure a custom list of possible configuration override sources + # The default is "config-map:cilium-config,cilium-node-config". For supported + # values, see the help text for the build-config subcommand. + # Note that this value should be a comma-separated string. + configSources: ~ + # @schema + # type: [null, string] + # @schema + # -- allowedConfigOverrides is a list of config-map keys that can be overridden. + # That is to say, if this value is set, config sources (excepting the first one) can + # only override keys in this list. + # + # This takes precedence over blockedConfigOverrides. + # + # By default, all keys may be overridden. To disable overrides, set this to "none" or + # change the configSources variable. + allowedConfigOverrides: ~ + # @schema + # type: [null, string] + # @schema + # -- blockedConfigOverrides is a list of config-map keys that may not be overridden. + # In other words, if any of these keys appear in a configuration source excepting the + # first one, they will be ignored + # + # This is ignored if allowedConfigOverrides is set. + # + # By default, all keys may be overridden. + blockedConfigOverrides: ~ + # @schema + # type: [null, boolean] + # @schema + # -- enableSourceIPVerification is a boolean flag to enable or disable the Source IP verification + # of endpoints. This flag is useful when Cilium is chained with other CNIs. + # + # By default, this functionality is enabled + enableSourceIPVerification: true +# -- Specify which network interfaces can run the eBPF datapath. This means +# that a packet sent from a pod to a destination outside the cluster will be +# masqueraded (to an output device IPv4 address), if the output device runs the +# program. When not specified, probing will automatically detect devices that have +# a non-local route. This should be used only when autodetection is not suitable. +# devices: "" + +# -- Enables experimental support for the detection of new and removed datapath +# devices. When devices change the eBPF datapath is reloaded and services updated. +# If "devices" is set then only those devices, or devices matching a wildcard will +# be considered. +# +# This option has been deprecated and is a no-op. +enableRuntimeDeviceDetection: true +# -- Forces the auto-detection of devices, even if specific devices are explicitly listed +forceDeviceDetection: false +# -- Chains to ignore when installing feeder rules. +# disableIptablesFeederRules: "" + +# -- Limit iptables-based egress masquerading to interface selector. +# egressMasqueradeInterfaces: "" + +# -- Enable setting identity mark for local traffic. +# enableIdentityMark: true + +# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. +# enableK8sEndpointSlice: true + +# -- Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead). +enableCiliumEndpointSlice: false +ciliumEndpointSlice: + # -- Enable Cilium EndpointSlice feature. + enabled: false + # -- List of rate limit options to be used for the CiliumEndpointSlice controller. + # Each object in the list must have the following fields: + # nodes: Count of nodes at which to apply the rate limit. + # limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. + # burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. + rateLimits: + - nodes: 0 + limit: 10 + burst: 20 + - nodes: 100 + limit: 50 + burst: 100 + # @schema + # enum: ["identity", "fcfs"] + # @schema + # -- The slicing mode to use for CiliumEndpointSlices. + # identity groups together CiliumEndpoints that share the same identity. + # fcfs groups together CiliumEndpoints in a first-come-first-serve basis, filling in the largest non-full slice first. + sliceMode: identity +envoyConfig: + # -- Enable CiliumEnvoyConfig CRD + # CiliumEnvoyConfig CRD can also be implicitly enabled by other options. + enabled: false + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for CiliumEnvoyConfig CRDs. + create: true + # -- The name of the secret namespace to which Cilium agents are given read access. + name: cilium-secrets + # -- Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated. + retryInterval: 15s +ingressController: + # -- Enable cilium ingress controller + # This will automatically set enable-envoy-config as well. + enabled: false + # -- Set cilium ingress controller to be the default ingress controller + # This will let cilium ingress controller route entries without ingress class set + default: false + # -- Default ingress load balancer mode + # Supported values: shared, dedicated + # For granular control, use the following annotations on the ingress resource: + # "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). + loadbalancerMode: dedicated + # -- Enforce https for host having matching TLS host in Ingress. + # Incoming traffic to http listener will return 308 http error code with respective location in header. + enforceHttps: true + # -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service + ingressLBAnnotationPrefixes: ['lbipam.cilium.io', 'nodeipam.cilium.io', 'service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] + # @schema + # type: [null, string] + # @schema + # -- Default secret namespace for ingresses without .spec.tls[].secretName set. + defaultSecretNamespace: + # @schema + # type: [null, string] + # @schema + # -- Default secret name for ingresses without .spec.tls[].secretName set. + defaultSecretName: + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Ingress. + create: true + # -- Name of Ingress secret namespace. + name: cilium-secrets + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + # -- Load-balancer service in shared mode. + # This is a single load-balancer service for all Ingress resources. + service: + # -- Service name + name: cilium-ingress + # -- Labels to be added for the shared LB service + labels: {} + # -- Annotations to be added for the shared LB service + annotations: {} + # -- Service type for the shared LB service + type: LoadBalancer + # @schema + # type: [null, integer] + # @schema + # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service + insecureNodePort: ~ + # @schema + # type: [null, integer] + # @schema + # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service + secureNodePort: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP on the shared LB service + loadBalancerIP: ~ + # @schema + # type: [null, boolean] + # @schema + # -- Configure if node port allocation is required for LB service + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + allocateLoadBalancerNodePorts: ~ + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode. + # Valid values are "Cluster" and "Local". + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # -- Configure a specific port on the host network that gets used for the shared listener. + sharedListenerPort: 8080 + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} +gatewayAPI: + # -- Enable support for Gateway API in cilium + # This will automatically set enable-envoy-config as well. + enabled: false + # -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. + enableAppProtocol: false + # -- Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1. + # Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`. + enableAlpn: false + # -- The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address. + xffNumTrustedHops: 0 + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local". + # Note that this value will be ignored when `hostNetwork.enabled == true`. + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + gatewayClass: + # -- Enable creation of GatewayClass resource + # The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster. + # Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively. + create: auto + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Gateway API. + create: true + # -- Name of Gateway API secret namespace. + name: cilium-secrets + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} +# -- Enables the fallback compatibility solution for when the xt_socket kernel +# module is missing and it is needed for the datapath L7 redirection to work +# properly. See documentation for details on when this can be disabled: +# https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. +enableXTSocketFallback: true +encryption: + # -- Enable transparent network encryption. + enabled: false + # -- Encryption method. Can be either ipsec or wireguard. + type: ipsec + # -- Enable encryption for pure node to node traffic. + # This option is only effective when encryption.type is set to "wireguard". + nodeEncryption: false + # -- Configure the WireGuard Pod2Pod strict mode. + strictMode: + # -- Enable WireGuard Pod2Pod strict mode. + enabled: false + # -- CIDR for the WireGuard Pod2Pod strict mode. + cidr: "" + # -- Allow dynamic lookup of remote node identities. + # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. + allowRemoteNodeIdentities: false + ipsec: + # -- Name of the key file inside the Kubernetes secret configured via secretName. + keyFile: keys + # -- Path to mount the secret inside the Cilium pod. + mountPath: /etc/ipsec + # -- Name of the Kubernetes secret containing the encryption keys. + secretName: cilium-ipsec-keys + # -- The interface to use for encrypted traffic. + interface: "" + # -- Enable the key watcher. If disabled, a restart of the agent will be + # necessary on key rotations. + keyWatcher: true + # -- Maximum duration of the IPsec key rotation. The previous key will be + # removed after that delay. + keyRotationDuration: "5m" + # -- Enable IPsec encrypted overlay + encryptedOverlay: false + wireguard: + # -- Controls WireGuard PersistentKeepalive option. Set 0s to disable. + persistentKeepalive: 0s +endpointHealthChecking: + # -- Enable connectivity health checking between virtual endpoints. + enabled: true +endpointRoutes: + # @schema + # type: [boolean, string] + # @schema + # -- Enable use of per endpoint routes instead of routing via + # the cilium_host interface. + enabled: false +k8sNetworkPolicy: + # -- Enable support for K8s NetworkPolicy + enabled: true +# -- Enable endpoint lockdown on policy map overflow. +endpointLockdownOnMapOverflow: false +eni: + # -- Enable Elastic Network Interface (ENI) integration. + enabled: false + # -- Update ENI Adapter limits from the EC2 API + updateEC2AdapterLimitViaAPI: true + # -- Release IPs not used from the ENI + awsReleaseExcessIPs: false + # -- Enable ENI prefix delegation + awsEnablePrefixDelegation: false + # -- EC2 API endpoint to use + ec2APIEndpoint: "" + # -- Tags to apply to the newly created ENIs + eniTags: {} + # -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable. + # @default -- `"5m"` + gcInterval: "" + # -- Additional tags attached to ENIs created by Cilium. + # Dangling ENIs with this tag will be garbage collected + # @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":""}` + gcTags: {} + # -- If using IAM role for Service Accounts will not try to + # inject identity values from cilium-aws kubernetes secret. + # Adds annotation to service account if managed by Helm. + # See https://github.com/aws/amazon-eks-pod-identity-webhook + iamRole: "" + # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetIDsFilter: [] + # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetTagsFilter: [] + # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances + # are going to be used to create new ENIs + instanceTagsFilter: [] +# fragmentTracking enables IPv4 fragment tracking support in the datapath. +# fragmentTracking: true +gke: + # -- Enable Google Kubernetes Engine integration + enabled: false +# -- Enable connectivity health checking. +healthChecking: true +# -- TCP port for the agent health API. This is not the port for cilium-health. +healthPort: 9879 +# -- Number of ICMP requests sent for each health check before marking a node or endpoint unreachable. +healthCheckICMPFailureThreshold: 3 +# -- Configure the host firewall. +hostFirewall: + # -- Enables the enforcement of host policies in the eBPF datapath. + enabled: false +hostPort: + # -- Enable hostPort service support. + enabled: false +# -- Configure socket LB +socketLB: + # -- Enable socket LB + enabled: false + # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. + # hostNamespaceOnly: false + # -- Enable terminating pod connections to deleted service backends. + # terminatePodConnections: true + # -- Enables tracing for socket-based load balancing. + # tracing: true +# -- Configure certificate generation for Hubble integration. +# If hubble.tls.auto.method=cronJob, these values are used +# for the Kubernetes CronJob which will be scheduled regularly to +# (re)generate any certificates not provided manually. +certgen: + # -- When set to true the certificate authority secret is created. + generateCA: true + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/certgen" + tag: "v0.2.1" + digest: "sha256:ab6b1928e9c5f424f6b0f51c68065b9fd85e2f8d3e5f21fbd1a3cb27e6fb9321" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Seconds after which the completed job pod will be deleted + ttlSecondsAfterFinished: 1800 + # -- Labels to be added to hubble-certgen pods + podLabels: {} + # -- Annotations to be added to the hubble-certgen initial Job and CronJob + annotations: + job: {} + cronJob: {} + # -- Node selector for certgen + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- Priority class for certgen + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + priorityClassName: "" + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- Additional certgen volumes. + extraVolumes: [] + # -- Additional certgen volumeMounts. + extraVolumeMounts: [] + # -- Affinity for certgen + affinity: {} +hubble: + # -- Enable Hubble (true by default). + enabled: true + # -- Annotations to be added to all top-level hubble objects (resources under templates/hubble) + annotations: {} + # -- Buffer size of the channel Hubble uses to receive monitor events. If this + # value is not set, the queue size is set to the default monitor queue size. + # eventQueueSize: "" + + # -- Number of recent flows for Hubble to cache. Defaults to 4095. + # Possible values are: + # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, + # 2047, 4095, 8191, 16383, 32767, 65535 + # eventBufferCapacity: "4095" + + # -- Hubble metrics configuration. + # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics + # for more comprehensive documentation about Hubble metrics. + metrics: + # @schema + # type: [null, array] + # @schema + # -- Configures the list of metrics to collect. If empty or null, metrics + # are disabled. + # Example: + # + # enabled: + # - dns:query;ignoreAAAA + # - drop + # - tcp + # - flow + # - icmp + # - http + # + # You can specify the list of metrics from the helm CLI: + # + # --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" + # + enabled: ~ + # -- Enables exporting hubble metrics in OpenMetrics format. + enableOpenMetrics: false + # -- Configure the port the hubble metric server listens on. + port: 9965 + tls: + # Enable hubble metrics server TLS. + enabled: false + # Configure hubble metrics server TLS. + server: + # -- Name of the Secret containing the certificate and key for the Hubble metrics server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble metrics server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble metrics server key (deprecated). + # Use existingSecret instead. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- Configure mTLS for the Hubble metrics server. + mtls: + # When set to true enforces mutual TLS between Hubble Metrics server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + enabled: false + useSecret: false + # -- Name of the ConfigMap containing the CA to validate client certificates against. + # If mTLS is enabled and this is unspecified, it will default to the + # same CA used for Hubble metrics server certificates. + name: ~ + # -- Entry of the ConfigMap containing the CA. + key: ca.crt + # -- Annotations to be added to hubble-metrics service. + serviceAnnotations: {} + serviceMonitor: + # -- Create ServiceMonitor resources for Prometheus Operator. + # This requires the prometheus CRDs to be available. + # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble + labels: {} + # -- Annotations to add to ServiceMonitor hubble + annotations: {} + # -- jobLabel to add for ServiceMonitor hubble + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # -- Relabeling configs for the ServiceMonitor hubble + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor hubble + metricRelabelings: ~ + # Configure TLS for the ServiceMonitor. + # Note, when using TLS you will either need to specify + # tlsConfig.insecureSkipVerify or specify a CA to use. + tlsConfig: {} + # -- Grafana dashboards for hubble + # grafana can import dashboards based on the label and value + # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} + # Dynamic metrics may be reconfigured without a need of agent restarts. + dynamic: + enabled: false + config: + # ---- Name of configmap with configuration that may be altered to reconfigure metric handlers within a running agent. + configMapName: cilium-dynamic-metrics-config + # ---- True if helm installer should create config map. + # Switch to false if you want to self maintain the file content. + createConfigMap: true + # ---- Exporters configuration in YAML format. + content: [] + # - name: dns + # contextOptions: [] + # includeFilters: [] + # excludeFilters: [] + # -- Unix domain socket path to listen to when Hubble is enabled. + socketPath: /var/run/cilium/hubble.sock + # -- Enables redacting sensitive information present in Layer 7 flows. + redact: + enabled: false + http: + # -- Enables redacting URL query (GET) parameters. + # Example: + # + # redact: + # enabled: true + # http: + # urlQuery: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.urlQuery="true" + urlQuery: false + # -- Enables redacting user info, e.g., password when basic auth is used. + # Example: + # + # redact: + # enabled: true + # http: + # userInfo: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.userInfo="true" + userInfo: true + headers: + # -- List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + # Example: + # redact: + # enabled: true + # http: + # headers: + # allow: + # - traceparent + # - tracestate + # - Cache-Control + # + # You can specify the options from the helm CLI: + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control" + allow: [] + # -- List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + # Example: + # redact: + # enabled: true + # http: + # headers: + # deny: + # - Authorization + # - Proxy-Authorization + # + # You can specify the options from the helm CLI: + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization" + deny: [] + kafka: + # -- Enables redacting Kafka's API key. + # Example: + # + # redact: + # enabled: true + # kafka: + # apiKey: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.kafka.apiKey="true" + apiKey: true + # -- An additional address for Hubble to listen to. + # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that + # Hubble is listening on port 4244. + listenAddress: ":4244" + # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. + preferIpv6: false + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Skip Hubble events with unknown cgroup ids + # @default -- `true` + skipUnknownCGroupIDs: ~ + peerService: + # -- Service Port for the Peer service. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + # -- Target Port for the Peer service, must match the hubble.listenAddress' + # port. + targetPort: 4244 + # -- The cluster domain to use to query the Hubble Peer service. It should + # be the local cluster. + clusterDomain: cluster.local + # -- TLS configuration for Hubble + tls: + # -- Enable mutual TLS for listenAddress. Setting this value to false is + # highly discouraged as the Hubble API provides access to potentially + # sensitive network flow metadata and is exposed on the host network. + enabled: true + # -- Configure automatic TLS certificates generation. + auto: + # -- Auto-generate certificates. + # When set to true, automatically generate a CA and certificates to + # enable mTLS between Hubble server and Hubble Relay instances. If set to + # false, the certs for Hubble server need to be provided by setting + # appropriate values below. + enabled: true + # -- Set the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + # + # Defaults to 365 days (1 year) because MacOS does not accept + # self-signed certificates with expirations > 825 days. + certValidityDuration: 365 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Defaults to midnight of the first day of every fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + schedule: "0 0 1 */4 *" + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when hubble.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- The Hubble server certificate and private key + server: + # -- Name of the Secret containing the certificate and key for the Hubble server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble server key (deprecated). + # Use existingSecret instead. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + relay: + # -- Enable Hubble Relay (requires hubble.enabled=true) + enabled: false + # -- Roll out Hubble Relay pods automatically when configmap is updated. + rollOutPods: false + # -- Hubble-relay container image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-relay" + tag: "v1.17.8" + # hubble-relay-digest + digest: "sha256:2e576bf7a02291c07bffbc1ca0a66a6c70f4c3eb155480e5b3ac027bedd2858b" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Specifies the resources for the hubble-relay pods + resources: {} + # -- Number of replicas run for the hubble-relay deployment. + replicas: 1 + # -- Affinity for hubble-replay + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + # -- Pod topology spread constraints for hubble-relay + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- Additional hubble-relay environment variables. + extraEnv: [] + # -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay) + annotations: {} + # -- Annotations to be added to hubble-relay pods + podAnnotations: {} + # -- Labels to be added to hubble-relay pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- The priority class to use for hubble-relay + priorityClassName: "" + # -- Configure termination grace period for hubble relay Deployment. + terminationGracePeriodSeconds: 1 + # -- hubble-relay update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 1 + # -- Additional hubble-relay volumes. + extraVolumes: [] + # -- Additional hubble-relay volumeMounts. + extraVolumeMounts: [] + # -- hubble-relay pod security context + podSecurityContext: + fsGroup: 65532 + # -- hubble-relay container security context + securityContext: + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + capabilities: + drop: + - ALL + # -- hubble-relay service configuration. + service: + # --- The type of service used for Hubble Relay access, either ClusterIP, NodePort or LoadBalancer. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31234 + # -- Host to listen to. Specify an empty string to bind to all the interfaces. + listenHost: "" + # -- Port to listen to. + listenPort: "4245" + # -- TLS configuration for Hubble Relay + tls: + # -- The hubble-relay client certificate and private key. + # This keypair is presented to Hubble server instances for mTLS + # authentication and is required when hubble.tls.enabled is true. + # These values need to be set manually if hubble.tls.auto.enabled is false. + client: + # -- Name of the Secret containing the certificate and key for the Hubble metrics server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble relay client certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble relay client key (deprecated). + # Use existingSecret instead. + key: "" + # -- The hubble-relay server certificate and private key + server: + # When set to true, enable TLS on for Hubble Relay server + # (ie: for clients connecting to the Hubble Relay API). + enabled: false + # When set to true enforces mutual TLS between Hubble Relay server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + mtls: false + # -- Name of the Secret containing the certificate and key for the Hubble relay server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble relay server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble relay server key (deprecated). + # Use existingSecret instead. + key: "" + # -- extra DNS names added to certificate when its auto gen + extraDnsNames: [] + # -- extra IP addresses added to certificate when its auto gen + extraIpAddresses: [] + # DNS name used by the backend to connect to the relay + # This is a simple workaround as the relay certificates are currently hardcoded to + # *.hubble-relay.cilium.io + # See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546 + # For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local + relayName: "ui.hubble-relay.cilium.io" + # @schema + # type: [null, string] + # @schema + # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). + # + # This option has been deprecated and is a no-op. + dialTimeout: ~ + # @schema + # type: [null, string] + # @schema + # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). + retryTimeout: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Max number of flows that can be buffered for sorting before being sent to the + # client (per request) (e.g. 100). + sortBufferLenMax: ~ + # @schema + # type: [null, string] + # @schema + # -- When the per-request flows sort buffer is not full, a flow is drained every + # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). + sortBufferDrainTimeout: ~ + # -- Port to use for the k8s service backed by hubble-relay pods. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + + # -- Enable prometheus metrics for hubble-relay on the configured port at + # /metrics + prometheus: + enabled: false + port: 9966 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble-relay + labels: {} + # -- Annotations to add to ServiceMonitor hubble-relay + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor hubble-relay + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor hubble-relay + metricRelabelings: ~ + gops: + # -- Enable gops for hubble-relay + enabled: true + # -- Configure gops listen port for hubble-relay + port: 9893 + pprof: + # -- Enable pprof for hubble-relay + enabled: false + # -- Configure pprof listen address for hubble-relay + address: localhost + # -- Configure pprof listen port for hubble-relay + port: 6062 + ui: + # -- Whether to enable the Hubble UI. + enabled: false + standalone: + # -- When true, it will allow installing the Hubble UI only, without checking dependencies. + # It is useful if a cluster already has cilium and Hubble relay installed and you just + # want Hubble UI to be deployed. + # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` + enabled: false + tls: + # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required + # to provide a volume for mounting the client certificates. + certsVolume: {} + # projected: + # defaultMode: 0400 + # sources: + # - secret: + # name: hubble-ui-client-certs + # items: + # - key: tls.crt + # path: client.crt + # - key: tls.key + # path: client.key + # - key: ca.crt + # path: hubble-relay-ca.crt + # -- Roll out Hubble-ui pods automatically when configmap is updated. + rollOutPods: false + tls: + client: + # -- Name of the Secret containing the client certificate and key for Hubble UI + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble UI client certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble UI client key (deprecated). + # Use existingSecret instead. + key: "" + backend: + # -- Hubble-ui backend image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-ui-backend" + tag: "v0.13.3" + digest: "sha256:db1454e45dc39ca41fbf7cad31eec95d99e5b9949c39daaad0fa81ef29d56953" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Hubble-ui backend security context. + securityContext: {} + # -- Additional hubble-ui backend environment variables. + extraEnv: [] + # -- Additional hubble-ui backend volumes. + extraVolumes: [] + # -- Additional hubble-ui backend volumeMounts. + extraVolumeMounts: [] + livenessProbe: + # -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + enabled: false + readinessProbe: + # -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + enabled: false + # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + frontend: + # -- Hubble-ui frontend image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-ui" + tag: "v0.13.3" + digest: "sha256:661d5de7050182d495c6497ff0b007a7a1e379648e60830dd68c4d78ae21761d" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Hubble-ui frontend security context. + securityContext: {} + # -- Additional hubble-ui frontend environment variables. + extraEnv: [] + # -- Additional hubble-ui frontend volumes. + extraVolumes: [] + # -- Additional hubble-ui frontend volumeMounts. + extraVolumeMounts: [] + # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + server: + # -- Controls server listener for ipv6 + ipv6: + enabled: true + # -- The number of replicas of Hubble UI to deploy. + replicas: 1 + # -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui) + annotations: {} + # -- Additional labels to be added to 'hubble-ui' deployment object + labels: {} + # -- Annotations to be added to hubble-ui pods + podAnnotations: {} + # -- Labels to be added to hubble-ui pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- Affinity for hubble-ui + affinity: {} + # -- Pod topology spread constraints for hubble-ui + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- The priority class to use for hubble-ui + priorityClassName: "" + # -- hubble-ui update strategy. + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 1 + # -- Security context to be added to Hubble UI pods + securityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + # -- hubble-ui service configuration. + service: + # -- Annotations to be added for the Hubble UI service + annotations: {} + # --- The type of service used for Hubble UI access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31235 + # -- Defines base url prefix for all hubble-ui http requests. + # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. + # Trailing `/` is required for custom path, ex. `/service-map/` + baseUrl: "/" + # -- hubble-ui ingress configuration. + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + className: "" + hosts: + - chart-example.local + labels: {} + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # -- Hubble flows export. + export: + # --- Defines max file size of output file before it gets rotated. + fileMaxSizeMb: 10 + # --- Defines max number of backup/rotated files. + fileMaxBackups: 5 + # --- Static exporter configuration. + # Static exporter is bound to agent lifecycle. + static: + enabled: false + filePath: /var/run/cilium/hubble/events.log + fieldMask: [] + # - time + # - source + # - destination + # - verdict + allowList: [] + # - '{"verdict":["DROPPED","ERROR"]}' + denyList: [] + # - '{"source_pod":["kube-system/"]}' + # - '{"destination_pod":["kube-system/"]}' + # --- Dynamic exporters configuration. + # Dynamic exporters may be reconfigured without a need of agent restarts. + dynamic: + enabled: false + config: + # ---- Name of configmap with configuration that may be altered to reconfigure exporters within a running agents. + configMapName: cilium-flowlog-config + # ---- True if helm installer should create config map. + # Switch to false if you want to self maintain the file content. + createConfigMap: true + # ---- Exporters configuration in YAML format. + content: + - name: all + fieldMask: [] + includeFilters: [] + excludeFilters: [] + filePath: "/var/run/cilium/hubble/events.log" + # - name: "test002" + # filePath: "/var/log/network/flow-log/pa/test002.log" + # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] + # includeFilters: + # - source_pod: ["default/"] + # event_type: + # - type: 1 + # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] + # excludeFilters: [] + # end: "2023-10-09T23:59:59-07:00" + # -- Emit v1.Events related to pods on detection of packet drops. + # This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. + dropEventEmitter: + enabled: false + # --- Minimum time between emitting same events. + interval: 2m + # --- Drop reasons to emit events for. + # ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason + reasons: + - auth_required + - policy_denied +# -- Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends). +identityAllocationMode: "crd" +# -- (string) Time to wait before using new identity on endpoint identity change. +# @default -- `"5s"` +identityChangeGracePeriod: "" +# -- Install Iptables rules to skip netfilter connection tracking on all pod +# traffic. This option is only effective when Cilium is running in direct +# routing and full KPR mode. Moreover, this option cannot be enabled when Cilium +# is running in a managed Kubernetes environment or in a chained CNI setup. +installNoConntrackIptablesRules: false +ipam: + # -- Configure IP Address Management mode. + # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ + mode: "cluster-pool" + # -- Maximum rate at which the CiliumNode custom resource is updated. + ciliumNodeUpdateRate: "15s" + # -- Pre-allocation settings for IPAM in Multi-Pool mode + multiPoolPreAllocation: "" + # -- Install ingress/egress routes through uplink on host for Pods when working with delegated IPAM plugin. + installUplinkRoutesForDelegatedIPAM: false + operator: + # @schema + # type: [array, string] + # @schema + # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"] + # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv4MaskSize: 24 + # @schema + # type: [array, string] + # @schema + # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv6PodCIDRList: ["fd00::/104"] + # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv6MaskSize: 120 + # -- IP pools to auto-create in multi-pool IPAM mode. + autoCreateCiliumPodIPPools: {} + # default: + # ipv4: + # cidrs: + # - 10.10.0.0/8 + # maskSize: 24 + # other: + # ipv6: + # cidrs: + # - fd00:100::/80 + # maskSize: 96 + # @schema + # type: [null, integer] + # @schema + # -- (int) The maximum burst size when rate limiting access to external APIs. + # Also known as the token bucket capacity. + # @default -- `20` + externalAPILimitBurstSize: ~ + # @schema + # type: [null, number] + # @schema + # -- (float) The maximum queries per second when rate limiting access to + # external APIs. Also known as the bucket refill rate, which is used to + # refill the bucket up to the burst size capacity. + # @default -- `4.0` + externalAPILimitQPS: ~ +# -- defaultLBServiceIPAM indicates the default LoadBalancer Service IPAM when +# no LoadBalancer class is set. Applicable values: lbipam, nodeipam, none +# @schema +# type: [string] +# @schema +defaultLBServiceIPAM: lbipam +nodeIPAM: + # -- Configure Node IPAM + # ref: https://docs.cilium.io/en/stable/network/node-ipam/ + enabled: false +# @schema +# type: [null, string] +# @schema +# -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API +apiRateLimit: ~ +# -- Configure the eBPF-based ip-masq-agent +ipMasqAgent: + enabled: false +# the config of nonMasqueradeCIDRs +# config: +# nonMasqueradeCIDRs: [] +# masqLinkLocal: false +# masqLinkLocalIPv6: false + +# iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. +# iptablesLockTimeout: "5s" +ipv4: + # -- Enable IPv4 support. + enabled: true +ipv6: + # -- Enable IPv6 support. + enabled: false +# -- Configure Kubernetes specific configuration +k8s: + # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + requireIPv4PodCIDR: false + # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + requireIPv6PodCIDR: false +# -- Keep the deprecated selector labels when deploying Cilium DaemonSet. +keepDeprecatedLabels: false +# -- Keep the deprecated probes when deploying Cilium DaemonSet +keepDeprecatedProbes: false +startupProbe: + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 +livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 + # -- whether to require k8s connectivity as part of the check. + requireK8sConnectivity: false +readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 +# -- Configure the kube-proxy replacement in Cilium BPF datapath +# Valid options are "true" or "false". +# ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ +#kubeProxyReplacement: "false" + +# -- healthz server bind address for the kube-proxy replacement. +# To enable set the value to '0.0.0.0:10256' for all ipv4 +# addresses and this '[::]:10256' for all ipv6 addresses. +# By default it is disabled. +kubeProxyReplacementHealthzBindAddr: "" +l2NeighDiscovery: + # -- Enable L2 neighbor discovery in the agent + enabled: true + # -- Override the agent's default neighbor resolution refresh period. + refreshPeriod: "30s" +# -- Enable Layer 7 network policy. +l7Proxy: true +# -- Enable Local Redirect Policy. +localRedirectPolicy: false +# To include or exclude matched resources from cilium identity evaluation +# labels: "" + +# logOptions allows you to define logging options. eg: +# logOptions: +# format: json + +# -- Enables periodic logging of system load +logSystemLoad: false +# -- Configure maglev consistent hashing +maglev: {} +# -- tableSize is the size (parameter M) for the backend table of one +# service entry +# tableSize: + +# -- hashSeed is the cluster-wide base64 encoded seed for the hashing +# hashSeed: + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +enableIPv4Masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +enableIPv6Masquerade: true +# -- Enables masquerading to the source of the route for traffic leaving the node from endpoints. +enableMasqueradeRouteSource: false +# -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods +enableIPv4BIGTCP: false +# -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods +enableIPv6BIGTCP: false +nat: + # -- Number of the top-k SNAT map connections to track in Cilium statedb. + mapStatsEntries: 32 + # -- Interval between how often SNAT map is counted for stats. + mapStatsInterval: 30s +egressGateway: + # -- Enables egress gateway to redirect and SNAT the traffic that leaves the + # cluster. + enabled: false + # -- Time between triggers of egress gateway state reconciliations + reconciliationTriggerInterval: 1s + # -- Maximum number of entries in egress gateway policy map + # maxPolicyEntries: 16384 +vtep: + # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow + # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + enabled: false + # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + endpoint: "" + # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + cidr: "" + # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + mask: "" + # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + mac: "" +# -- (string) Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv4NativeRoutingCIDR: "" +# -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv6NativeRoutingCIDR: "" +# -- cilium-monitor sidecar. +monitor: + # -- Enable the cilium-monitor sidecar. + enabled: false +# -- Configure service load balancing +loadBalancer: + # -- standalone enables the standalone L4LB which does not connect to + # kube-apiserver. + # standalone: false + + # -- algorithm is the name of the load balancing algorithm for backend + # selection e.g. random or maglev + # algorithm: random + + # -- mode is the operation mode of load balancing for remote backends + # e.g. snat, dsr, hybrid + # mode: snat + + # -- acceleration is the option to accelerate service handling via XDP + # Applicable values can be: disabled (do not use XDP), native (XDP BPF + # program is run directly out of the networking driver's early receive + # path), or best-effort (use native mode XDP acceleration on devices + # that support it). + acceleration: disabled + # -- dsrDispatch configures whether IP option or IPIP encapsulation is + # used to pass a service IP and port to remote backend + # dsrDispatch: opt + + # -- serviceTopology enables K8s Topology Aware Hints -based service + # endpoints filtering + # serviceTopology: false + + # -- experimental enables support for the experimental load-balancing + # control-plane. + experimental: false + # -- L7 LoadBalancer + l7: + # -- Enable L7 service load balancing via envoy proxy. + # The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7, + # will be forwarded to the local backend proxy to be load balanced to the service endpoints. + # Please refer to docs for supported annotations for more configuration. + # + # Applicable values: + # - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well. + # - disabled: Disable L7 load balancing by way of service annotation. + backend: disabled + # -- List of ports from service to be automatically redirected to above backend. + # Any service exposing one of these ports will be automatically redirected. + # Fine-grained control can be achieved by using the service annotation. + ports: [] + # -- Default LB algorithm + # The default LB algorithm to be used for services, which can be overridden by the + # service annotation (e.g. service.cilium.io/lb-l7-algorithm) + # Applicable values: round_robin, least_request, random + algorithm: round_robin +# -- Configure N-S k8s service loadbalancing +nodePort: + # -- Enable the Cilium NodePort service implementation. + enabled: false + # -- Port range to use for NodePort services. + # range: "30000,32767" + + # @schema + # type: [null, string, array] + # @schema + # -- List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing. + # By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used. + # + # Example: + # + # addresses: ["192.168.1.0/24", "2001::/64"] + # + addresses: ~ + # -- Set to true to prevent applications binding to service ports. + bindProtection: true + # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral + # ports is detected. + autoProtectPortRange: true + # -- Enable healthcheck nodePort server for NodePort services + enableHealthCheck: true + # -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs + # EnableHealthCheck to be enabled + enableHealthCheckLoadBalancerIP: false +# policyAuditMode: false + +# -- The agent can be put into one of the three policy enforcement modes: +# default, always and never. +# ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes +policyEnforcementMode: "default" +# @schema +# type: [null, string, array] +# @schema +# -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector. +# The possible value is "nodes". +policyCIDRMatchMode: +pprof: + # -- Enable pprof for cilium-agent + enabled: false + # -- Configure pprof listen address for cilium-agent + address: localhost + # -- Configure pprof listen port for cilium-agent + port: 6060 +# -- Configure prometheus metrics on the configured port at /metrics +prometheus: + metricsService: false + enabled: false + port: 9962 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-agent + labels: {} + # -- Annotations to add to ServiceMonitor cilium-agent + annotations: {} + # -- jobLabel to add for ServiceMonitor cilium-agent + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-agent + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-agent + metricRelabelings: ~ + # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying + trustCRDsExist: false + # @schema + # type: [null, array] + # @schema + # -- Metrics that should be enabled or disabled from the default metric list. + # The list is expected to be separated by a space. (+metric_foo to enable + # metric_foo , -metric_bar to disable metric_bar). + # ref: https://docs.cilium.io/en/stable/observability/metrics/ + metrics: ~ + # --- Enable controller group metrics for monitoring specific Cilium + # subsystems. The list is a list of controller group names. The special + # values of "all" and "none" are supported. The set of controller + # group names is not guaranteed to be stable between Cilium versions. + controllerGroupMetrics: + - write-cni-file + - sync-host-ips + - sync-lb-maps-with-k8s-services +# -- Grafana dashboards for cilium-agent +# grafana can import dashboards based on the label and value +# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards +dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} +# Configure Cilium Envoy options. +envoy: + # @schema + # type: [null, boolean] + # @schema + # -- Enable Envoy Proxy in standalone DaemonSet. + # This field is enabled by default for new installation. + # @default -- `true` for new installation + enabled: ~ + # -- (int) + # Set Envoy'--base-id' to use when allocating shared memory regions. + # Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0' + baseID: 0 + log: + # @schema + # type: [null, string] + # @schema + # -- The format string to use for laying out the log message metadata of Envoy. If specified, Envoy will use text format output. + # This setting is mutually exclusive with envoy.log.format_json. + format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v" + # @schema + # type: [null, object] + # @schema + # -- The JSON logging format to use for Envoy. This setting is mutually exclusive with envoy.log.format. + # ref: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/bootstrap/v3/bootstrap.proto#envoy-v3-api-field-config-bootstrap-v3-bootstrap-applicationlogconfig-logformat-json-format + format_json: null + # date: "%Y-%m-%dT%T.%e" + # thread_id: "%t" + # source_line: "%s:%#" + # level: "%l" + # logger: "%n" + # message: "%j" + # -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout. + path: "" + # @schema + # oneOf: + # - type: [null] + # - enum: [trace,debug,info,warning,error,critical,off] + # @schema + # -- Default log level of Envoy application log that is configured if Cilium debug / verbose logging isn't enabled. + # This option allows to have a different log level than the Cilium Agent - e.g. lower it to `critical`. + # Possible values: trace, debug, info, warning, error, critical, off + # @default -- Defaults to the default log level of the Cilium Agent - `info` + defaultLevel: ~ + # @schema + # type: [null, integer] + # @schema + # -- Size of the Envoy access log buffer created within the agent in bytes. + # Tune this value up if you encounter "Envoy: Discarded truncated access log message" errors. + # Large request/response header sizes (e.g. 16KiB) will require a larger buffer size. + accessLogBufferSize: 4096 + # -- Time in seconds after which a TCP connection attempt times out + connectTimeoutSeconds: 2 + # -- Time in seconds after which the initial fetch on an xDS stream is considered timed out + initialFetchTimeoutSeconds: 30 + # -- Maximum number of concurrent retries on Envoy clusters + maxConcurrentRetries: 128 + # -- Maximum number of retries for each HTTP request + httpRetryCount: 3 + # -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy + maxRequestsPerConnection: 0 + # -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable) + maxConnectionDurationSeconds: 0 + # -- Set Envoy upstream HTTP idle connection timeout seconds. + # Does not apply to connections with pending requests. Default 60s + idleTimeoutDurationSeconds: 60 + # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. + xffNumTrustedHopsL7PolicyIngress: 0 + # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. + xffNumTrustedHopsL7PolicyEgress: 0 + # @schema + # type: [null, string] + # @schema + # -- Max duration to wait for endpoint policies to be restored on restart. Default "3m". + policyRestoreTimeoutDuration: null + # -- Envoy container image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium-envoy" + tag: "v1.33.9-1757932127-3c04e8f2f1027d106b96f8ef4a0215e81dbaaece" + pullPolicy: "IfNotPresent" + digest: "sha256:06fbc4e55d926dd82ff2a0049919248dcc6be5354609b09012b01bc9c5b0ee28" + useDigest: true + # -- Additional containers added to the cilium Envoy DaemonSet. + extraContainers: [] + # -- Additional envoy container arguments. + extraArgs: [] + # -- Additional envoy container environment variables. + extraEnv: [] + # -- Additional envoy hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional envoy volumes. + extraVolumes: [] + # -- Additional envoy volumeMounts. + extraVolumeMounts: [] + # -- Configure termination grace period for cilium-envoy DaemonSet. + terminationGracePeriodSeconds: 1 + # -- TCP port for the health API. + healthPort: 9878 + # -- cilium-envoy update strategy + # ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 2 + # -- Roll out cilium envoy pods automatically when configmap is updated. + rollOutPods: false + # -- ADVANCED OPTION: Bring your own custom Envoy bootstrap ConfigMap. Provide the name of a ConfigMap with a `bootstrap-config.json` key. + # When specified, Envoy will use this ConfigMap instead of the default provided by the chart. + # WARNING: Use of this setting has the potential to prevent cilium-envoy from starting up, and can cause unexpected behavior (e.g. due to + # syntax error or semantically incorrect configuration). Before submitting an issue, please ensure you have disabled this feature, as support + # cannot be provided for custom Envoy bootstrap configs. + # @schema + # type: [null, string] + # @schema + bootstrapConfigMap: ~ + # -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) + annotations: {} + # -- Security Context for cilium-envoy pods. + podSecurityContext: + # -- AppArmorProfile options for the `cilium-agent` and init containers + appArmorProfile: + type: "Unconfined" + # -- Annotations to be added to envoy pods + podAnnotations: {} + # -- Labels to be added to envoy pods + podLabels: {} + # -- Envoy resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + startupProbe: + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 + livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 + readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 + securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-envoy` container + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-envoy` container. + # Even though granted to the container, the cilium-envoy-starter wrapper drops + # all capabilities after forking the actual Envoy process. + # `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by + # setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the + # capability to the container). + # Note: In case of embedded envoy, the capability must be granted to the cilium-agent container. + envoy: + # Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT + - NET_ADMIN + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + # -- Keep capability `NET_BIND_SERVICE` for Envoy process. + keepCapNetBindService: false + # -- Affinity for cilium-envoy. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium-envoy + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cilium.io/no-schedule + operator: NotIn + values: + - "true" + # -- Node selector for cilium-envoy. + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for envoy scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # @schema + # type: [null, string] + # @schema + # -- The priority class to use for cilium-envoy. + priorityClassName: ~ + # @schema + # type: [null, string] + # @schema + # -- DNS policy for Cilium envoy pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: ~ + debug: + admin: + # -- Enable admin interface for cilium-envoy. + # This is useful for debugging and should not be enabled in production. + enabled: false + # -- Port number (bound to loopback interface). + # kubectl port-forward can be used to access the admin interface. + port: 9901 + # -- Configure Cilium Envoy Prometheus options. + # Note that some of these apply to either cilium-agent or cilium-envoy. + prometheus: + # -- Enable prometheus metrics for cilium-envoy + enabled: true + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + # Note that this setting applies to both cilium-envoy _and_ cilium-agent + # with Envoy enabled. + enabled: false + # -- Labels to add to ServiceMonitor cilium-envoy + labels: {} + # -- Annotations to add to ServiceMonitor cilium-envoy + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-envoy + # or for cilium-agent with Envoy configured. + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy + # or for cilium-agent with Envoy configured. + metricRelabelings: ~ + # -- Serve prometheus metrics for cilium-envoy on the configured port + port: "9964" +# -- Enable/Disable use of node label based identity +nodeSelectorLabels: false +# -- Enable resource quotas for priority classes used in the cluster. +resourceQuotas: + enabled: false + cilium: + hard: + # 5k nodes * 2 DaemonSets (Cilium and cilium node init) + pods: "10k" + operator: + hard: + # 15 "clusterwide" Cilium Operator pods for HA + pods: "15" +# Need to document default +################## +#sessionAffinity: false + +# -- Do not run Cilium agent when running with clean mode. Useful to completely +# uninstall Cilium as it will stop Cilium from starting and create artifacts +# in the node. +sleepAfterInit: false +# -- Enable check of service source ranges (currently, only for LoadBalancer). +svcSourceRangeCheck: true +# -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. +synchronizeK8sNodes: true +# -- Configure TLS configuration in the agent. +tls: + # @schema + # type: [null, string] + # @schema + # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies + # (namely the secrets referenced by terminatingTLS and originatingTLS). + # This value is DEPRECATED and will be removed in a future version. + # Use `tls.readSecretsOnlyFromSecretsNamespace` instead. + # Possible values: + # - local + # - k8s + secretsBackend: ~ + # @schema + # type: [null, boolean] + # @schema + # -- Configure if the Cilium Agent will only look in `tls.secretsNamespace` for + # CiliumNetworkPolicy relevant Secrets. + # If false, the Cilium Agent will be granted READ (GET/LIST/WATCH) access + # to _all_ secrets in the entire cluster. This is not recommended and is + # included for backwards compatibility. + # This value obsoletes `tls.secretsBackend`, with `true` == `local` in the old + # setting, and `false` == `k8s`. + readSecretsOnlyFromSecretsNamespace: ~ + # -- Configures where secrets used in CiliumNetworkPolicies will be looked for + secretsNamespace: + # -- Create secrets namespace for TLS Interception secrets. + create: true + # -- Name of TLS Interception secret namespace. + name: cilium-secrets + # -- Configures settings for synchronization of TLS Interception Secrets + secretSync: + # @schema + # type: [null, boolean] + # @schema + # -- Enable synchronization of Secrets for TLS Interception. If disabled and + # tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent. + enabled: ~ + # -- Base64 encoded PEM values for the CA certificate and private key. + # This can be used as common CA to generate certificates used by hubble and clustermesh components. + # It is neither required nor used when cert-manager is used to generate the certificates. + ca: + # -- Optional CA cert. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + cert: "" + # -- Optional CA private key. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + key: "" + # -- Generated certificates validity duration in days. This will be used for auto generated CA. + certValidityDuration: 1095 + # -- Configure the CA trust bundle used for the validation of the certificates + # leveraged by hubble and clustermesh. When enabled, it overrides the content of the + # 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time. + caBundle: + # -- Enable the use of the CA trust bundle. + enabled: false + # -- Name of the ConfigMap containing the CA trust bundle. + name: cilium-root-ca.crt + # -- Entry of the ConfigMap containing the CA trust bundle. + key: ca.crt + # -- Use a Secret instead of a ConfigMap. + useSecret: false + # If uncommented, creates the ConfigMap and fills it with the specified content. + # Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace. + # + # content: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- +# -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. +# Possible values: +# - "" +# - vxlan +# - geneve +# @default -- `"vxlan"` +tunnelProtocol: "" +# -- Enable native-routing mode or tunneling mode. +# Possible values: +# - "" +# - native +# - tunnel +# @default -- `"tunnel"` +routingMode: "" +# -- Configure VXLAN and Geneve tunnel port. +# @default -- Port 8472 for VXLAN, Port 6081 for Geneve +tunnelPort: 0 +# -- Configure VXLAN and Geneve tunnel source port range hint. +# @default -- 0-0 to let the kernel driver decide the range +tunnelSourcePortRange: 0-0 +# -- Configure what the response should be to traffic for a service without backends. +# Possible values: +# - reject (default) +# - drop +serviceNoBackendResponse: reject +# -- Configure the underlying network MTU to overwrite auto-detected MTU. +# This value doesn't change the host network interface MTU i.e. eth0 or ens0. +# It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net, +# cilium_vxlan and lxc_health interfaces. +MTU: 0 +# -- Disable the usage of CiliumEndpoint CRD. +disableEndpointCRD: false +wellKnownIdentities: + # -- Enable the use of well-known identities. + enabled: false +etcd: + # -- Enable etcd mode for the agent. + enabled: false + # -- List of etcd endpoints + endpoints: + - https://CHANGE-ME:2379 + # -- Enable use of TLS/SSL for connectivity to etcd. + ssl: false +operator: + # -- Enable the cilium-operator component (required). + enabled: true + # -- Roll out cilium-operator pods automatically when configmap is updated. + rollOutPods: false + # -- cilium-operator image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/operator" + tag: "v1.17.8" + # operator-generic-digest + genericDigest: "sha256:5468807b9c31997f3a1a14558ec7c20c5b962a2df6db633b7afbe2f45a15da1c" + # operator-azure-digest + azureDigest: "sha256:619f9febf3efef2724a26522b253e4595cd33c274f5f49925e29a795fdc2d2d7" + # operator-aws-digest + awsDigest: "sha256:28012f7d0f4f23e9f6c7d6a5dd931afa326bbac3e8103f3f6f22b9670847dffa" + # operator-alibabacloud-digest + alibabacloudDigest: "sha256:72c25a405ad8e58d2cf03f7ea2b6696ed1edcfb51716b5f85e45c6c4fcaa6056" + useDigest: true + pullPolicy: "IfNotPresent" + suffix: "" + # -- Number of replicas to run for the cilium-operator deployment + replicas: 2 + # -- The priority class to use for cilium-operator + priorityClassName: "" + # -- DNS policy for Cilium operator pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: "" + # -- cilium-operator update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxSurge: 25% + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 50% + # -- Affinity for cilium-operator + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + # -- Pod topology spread constraints for cilium-operator + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for cilium-operator pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for cilium-operator scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Additional cilium-operator container arguments. + extraArgs: [] + # -- Additional cilium-operator environment variables. + extraEnv: [] + # -- Additional cilium-operator hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional cilium-operator volumes. + extraVolumes: [] + # -- Additional cilium-operator volumeMounts. + extraVolumeMounts: [] + # -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator) + annotations: {} + # -- HostNetwork setting + hostNetwork: true + # -- Security context to be added to cilium-operator pods + podSecurityContext: {} + # -- Annotations to be added to cilium-operator pods + podAnnotations: {} + # -- Labels to be added to cilium-operator pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- cilium-operator resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 1000m + # memory: 1Gi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Security context to be added to cilium-operator pods + securityContext: {} + # runAsUser: 0 + + # -- Interval for endpoint garbage collection. + endpointGCInterval: "5m0s" + # -- Interval for cilium node garbage collection. + nodeGCInterval: "5m0s" + # -- Interval for identity garbage collection. + identityGCInterval: "15m0s" + # -- Timeout for identity heartbeats. + identityHeartbeatTimeout: "30m0s" + pprof: + # -- Enable pprof for cilium-operator + enabled: false + # -- Configure pprof listen address for cilium-operator + address: localhost + # -- Configure pprof listen port for cilium-operator + port: 6061 + # -- Enable prometheus metrics for cilium-operator on the configured port at + # /metrics + prometheus: + metricsService: false + enabled: true + port: 9963 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-operator + labels: {} + # -- Annotations to add to ServiceMonitor cilium-operator + annotations: {} + # -- jobLabel to add for ServiceMonitor cilium-operator + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor cilium-operator + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-operator + metricRelabelings: ~ + # -- Grafana dashboards for cilium-operator + # grafana can import dashboards based on the label and value + # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} + # -- Skip CRDs creation for cilium-operator + skipCRDCreation: false + # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium + # pod running. + removeNodeTaints: true + # @schema + # type: [null, boolean] + # @schema + # -- Taint nodes where Cilium is scheduled but not running. This prevents pods + # from being scheduled to nodes where Cilium is not the default CNI provider. + # @default -- same as removeNodeTaints + setNodeTaints: ~ + # -- Set Node condition NetworkUnavailable to 'false' with the reason + # 'CiliumIsUp' for nodes that have a healthy Cilium pod. + setNodeNetworkStatus: true + unmanagedPodWatcher: + # -- Restart any pod that are not managed by Cilium. + restart: true + # -- Interval, in seconds, to check if there are any pods that are not + # managed by Cilium. + intervalSeconds: 15 +nodeinit: + # -- Enable the node initialization DaemonSet + enabled: false + # -- node-init image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/startup-script" + tag: "1755531540-60ee83e" + digest: "sha256:5bdca3c2dec2c79f58d45a7a560bf1098c2126350c901379fe850b7f78d3d757" + useDigest: true + pullPolicy: "IfNotPresent" + # -- The priority class to use for the nodeinit pod. + priorityClassName: "" + # -- node-init update strategy + updateStrategy: + type: RollingUpdate + # -- Additional nodeinit environment variables. + extraEnv: [] + # -- Additional nodeinit volumes. + extraVolumes: [] + # -- Additional nodeinit volumeMounts. + extraVolumeMounts: [] + # -- Affinity for cilium-nodeinit + affinity: {} + # -- Node labels for nodeinit pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for nodeinit scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit) + annotations: {} + # -- Annotations to be added to node-init pods. + podAnnotations: {} + # -- Labels to be added to node-init pods. + podLabels: {} + # -- Security Context for cilium-node-init pods. + podSecurityContext: + # -- AppArmorProfile options for the `cilium-node-init` and init containers + appArmorProfile: + type: "Unconfined" + # -- nodeinit resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: 100m + memory: 100Mi + # -- Security context to be added to nodeinit pods. + securityContext: + privileged: false + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # Used for nsenter + - NET_ADMIN + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + # -- bootstrapFile is the location of the file where the bootstrap timestamp is + # written by the node-init DaemonSet + bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time" + # -- startup offers way to customize startup nodeinit script (pre and post position) + startup: + preScript: "" + postScript: "" + # -- prestop offers way to customize prestop nodeinit script (pre and post position) + prestop: + preScript: "" + postScript: "" +preflight: + # -- Enable Cilium pre-flight resources (required for upgrade) + enabled: false + # -- Cilium pre-flight image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.17.8" + # cilium-digest + digest: "sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636" + useDigest: true + pullPolicy: "IfNotPresent" + # -- The priority class to use for the preflight pod. + priorityClassName: "" + # -- preflight update strategy + updateStrategy: + type: RollingUpdate + # -- Additional preflight environment variables. + extraEnv: [] + # -- Additional preflight volumes. + extraVolumes: [] + # -- Additional preflight volumeMounts. + extraVolumeMounts: [] + # -- Affinity for cilium-preflight + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + # -- Node labels for preflight pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for preflight scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight) + annotations: {} + # -- Security context to be added to preflight pods. + podSecurityContext: {} + # -- Annotations to be added to preflight pods + podAnnotations: {} + # -- Labels to be added to the preflight pod. + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- preflight resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + readinessProbe: + # -- For how long kubelet should wait before performing the first probe + initialDelaySeconds: 5 + # -- interval between checks of the readiness probe + periodSeconds: 5 + # -- Security context to be added to preflight pods + securityContext: {} + # runAsUser: 0 + + # -- Path to write the `--tofqdns-pre-cache` file to. + tofqdnsPreCache: "" + # -- Configure termination grace period for preflight Deployment and DaemonSet. + terminationGracePeriodSeconds: 1 + # -- By default we should always validate the installed CNPs before upgrading + # Cilium. This will make sure the user will have the policies deployed in the + # cluster with the right schema. + validateCNPs: true +# -- Explicitly enable or disable priority class. +# .Capabilities.KubeVersion is unsettable in `helm template` calls, +# it depends on k8s libraries version that Helm was compiled against. +# This option allows to explicitly disable setting the priority class, which +# is useful for rendering charts for gke clusters in advance. +enableCriticalPriorityClass: true +# disableEnvoyVersionCheck removes the check for Envoy, which can be useful +# on AArch64 as the images do not currently ship a version of Envoy. +#disableEnvoyVersionCheck: false +clustermesh: + # -- Deploy clustermesh-apiserver for clustermesh + useAPIServer: false + # -- The maximum number of clusters to support in a ClusterMesh. This value + # cannot be changed on running clusters, and all clusters in a ClusterMesh + # must be configured with the same value. Values > 255 will decrease the + # maximum allocatable cluster-local identities. + # Supported values are 255 and 511. + maxConnectedClusters: 255 + # -- Enable the synchronization of Kubernetes EndpointSlices corresponding to + # the remote endpoints of appropriately-annotated global services through ClusterMesh + enableEndpointSliceSynchronization: false + # -- Enable Multi-Cluster Services API support + enableMCSAPISupport: false + # -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config) + annotations: {} + # -- Clustermesh explicit configuration. + config: + # -- Enable the Clustermesh explicit configuration. + enabled: false + # -- Default dns domain for the Clustermesh API servers + # This is used in the case cluster addresses are not provided + # and IPs are used. + domain: mesh.cilium.io + # -- List of clusters to be peered in the mesh. + clusters: [] + # clusters: + # # -- Name of the cluster + # - name: cluster1 + # # -- Address of the cluster, use this if you created DNS records for + # # the cluster Clustermesh API server. + # address: cluster1.mesh.cilium.io + # # -- Port of the cluster Clustermesh API server. + # port: 2379 + # # -- IPs of the cluster Clustermesh API server, use multiple ones when + # # you have multiple IPs to access the Clustermesh API server. + # ips: + # - 172.18.255.201 + # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority. + # # These fields can (and should) be omitted in case the CA is shared across clusters. In that case, the + # # "remote" private key and certificate available in the local cluster are automatically used instead. + # tls: + # cert: "" + # key: "" + # caCert: "" + apiserver: + # -- Clustermesh API server image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/clustermesh-apiserver" + tag: "v1.17.8" + # clustermesh-apiserver-digest + digest: "sha256:3ac210d94d37a77ec010f9ac4c705edc8f15f22afa2b9a6f0e2a7d64d2360586" + useDigest: true + pullPolicy: "IfNotPresent" + # -- TCP port for the clustermesh-apiserver health API. + healthPort: 9880 + # -- Configuration for the clustermesh-apiserver readiness probe. + readinessProbe: {} + etcd: + # The etcd binary is included in the clustermesh API server image, so the same image from above is reused. + # Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is + # built with. + + # -- Specifies the resources for etcd container in the apiserver + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 1000m + # memory: 256Mi + + # -- Security context to be added to clustermesh-apiserver etcd containers + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- lifecycle setting for the etcd container + lifecycle: {} + init: + # -- Specifies the resources for etcd init container in the apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 100Mi + # limits: + # cpu: 100m + # memory: 100Mi + + # -- Additional arguments to `clustermesh-apiserver etcdinit`. + extraArgs: [] + # -- Additional environment variables to `clustermesh-apiserver etcdinit`. + extraEnv: [] + # @schema + # enum: [Disk, Memory] + # @schema + # -- Specifies whether etcd data is stored in a temporary volume backed by + # the node's default medium, such as disk, SSD or network storage (Disk), or + # RAM (Memory). The Memory option enables improved etcd read and write + # performance at the cost of additional memory usage, which counts against + # the memory limits of the container. + storageMedium: Disk + kvstoremesh: + # -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved + # from the remote clusters in the local etcd instance. + enabled: true + # -- TCP port for the KVStoreMesh health API. + healthPort: 9881 + # -- Configuration for the KVStoreMesh readiness probe. + readinessProbe: {} + # -- Additional KVStoreMesh arguments. + extraArgs: [] + # -- Additional KVStoreMesh environment variables. + extraEnv: [] + # -- Resource requests and limits for the KVStoreMesh container + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Additional KVStoreMesh volumeMounts. + extraVolumeMounts: [] + # -- KVStoreMesh Security context + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- lifecycle setting for the KVStoreMesh container + lifecycle: {} + service: + # -- The type of service used for apiserver access. + type: NodePort + # -- Optional port to use as the node port for apiserver access. + # + # WARNING: make sure to configure a different NodePort in each cluster if + # kube-proxy replacement is enabled, as Cilium is currently affected by a known + # bug (#24692) when NodePorts are handled by the KPR implementation. If a service + # with the same NodePort exists both in the local and the remote cluster, all + # traffic originating from inside the cluster and targeting the corresponding + # NodePort will be redirected to a local backend, regardless of whether the + # destination node belongs to the local or the remote cluster. + nodePort: 32379 + # -- Annotations for the clustermesh-apiserver service. + # Example annotations to configure an internal load balancer on different cloud providers: + # * AKS: service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # * EKS: service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" + # * GKE: networking.gke.io/load-balancer-type: "Internal" + annotations: {} + # -- Labels for the clustermesh-apiserver service. + labels: {} + # @schema + # enum: [Local, Cluster] + # @schema + # -- The externalTrafficPolicy of service used for apiserver access. + externalTrafficPolicy: Cluster + # @schema + # enum: [Local, Cluster] + # @schema + # -- The internalTrafficPolicy of service used for apiserver access. + internalTrafficPolicy: Cluster + # @schema + # enum: [HAOnly, Always, Never] + # @schema + # -- Defines when to enable session affinity. + # Each replica in a clustermesh-apiserver deployment runs its own discrete + # etcd cluster. Remote clients connect to one of the replicas through a + # shared Kubernetes Service. A client reconnecting to a different backend + # will require a full resync to ensure data integrity. Session affinity + # can reduce the likelihood of this happening, but may not be supported + # by all cloud providers. + # Possible values: + # - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. + # - "Always" Always enable session affinity. + # - "Never" Never enable session affinity. Useful in environments where + # session affinity is not supported, but may lead to slightly + # degraded performance due to more frequent reconnections. + enableSessionAffinity: "HAOnly" + # @schema + # type: [null, string] + # @schema + # -- Configure a loadBalancerClass. + # Allows to configure the loadBalancerClass on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer + # (requires Kubernetes 1.24+). + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP. + # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer. + loadBalancerIP: ~ + # -- Configure loadBalancerSourceRanges. + # Allows to configure the source IP ranges allowed to access the + # clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. + loadBalancerSourceRanges: [] + # -- Number of replicas run for the clustermesh-apiserver deployment. + replicas: 1 + # -- lifecycle setting for the apiserver container + lifecycle: {} + # -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment + terminationGracePeriodSeconds: 30 + # -- Additional clustermesh-apiserver arguments. + extraArgs: [] + # -- Additional clustermesh-apiserver environment variables. + extraEnv: [] + # -- Additional clustermesh-apiserver volumes. + extraVolumes: [] + # -- Additional clustermesh-apiserver volumeMounts. + extraVolumeMounts: [] + # -- Security context to be added to clustermesh-apiserver containers + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- Security context to be added to clustermesh-apiserver pods + podSecurityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + fsGroup: 65532 + # -- Annotations to be added to clustermesh-apiserver pods + podAnnotations: {} + # -- Labels to be added to clustermesh-apiserver pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- Resource requests and limits for the clustermesh-apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Affinity for clustermesh.apiserver + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + k8s-app: clustermesh-apiserver + topologyKey: kubernetes.io/hostname + # -- Pod topology spread constraints for clustermesh-apiserver + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- clustermesh-apiserver update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxSurge: 1 + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 0 + # -- The priority class to use for clustermesh-apiserver + priorityClassName: "" + tls: + # -- Configure the clustermesh authentication mode. + # Supported values: + # - legacy: All clusters access remote clustermesh instances with the same + # username (i.e., remote). The "remote" certificate must be + # generated with CN=remote if provided manually. + # - migration: Intermediate mode required to upgrade from legacy to cluster + # (and vice versa) with no disruption. Specifically, it enables + # the creation of the per-cluster usernames, while still using + # the common one for authentication. The "remote" certificate must + # be generated with CN=remote if provided manually (same as legacy). + # - cluster: Each cluster accesses remote etcd instances with a username + # depending on the local cluster name (i.e., remote-). + # The "remote" certificate must be generated with CN=remote- + # if provided manually. Cluster mode is meaningful only when the same + # CA is shared across all clusters part of the mesh. + authMode: legacy + # -- Allow users to provide their own certificates + # Users may need to provide their certificates using + # a mechanism that requires they provide their own secrets. + # This setting does not apply to any of the auto-generated + # mechanisms below, it only restricts the creation of secrets + # via the `tls-provided` templates. + enableSecrets: true + # -- Configure automatic TLS certificates generation. + # A Kubernetes CronJob is used the generate any + # certificates not provided by the user at installation + # time. + auto: + # -- When set to true, automatically generate a CA and certificates to + # enable mTLS between clustermesh-apiserver and external workload instances. + # If set to false, the certs to be provided by setting appropriate values below. + enabled: true + # Sets the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Due to the out-of-band distribution of client certs to external workloads the + # CA is (re)regenerated only if it is not provided as a helm value and the k8s + # secret is manually deleted. + # + # Defaults to none. Commented syntax gives midnight of the first day of every + # fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + # schedule: "0 0 1 */4 *" + + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. + # Used if 'auto' is not enabled. + server: + cert: "" + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. + # Used if 'auto' is not enabled. + admin: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. + # Used if 'auto' is not enabled. + client: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. + # Used if 'auto' is not enabled. + remote: + cert: "" + key: "" + # clustermesh-apiserver Prometheus metrics configuration + metrics: + # -- Enables exporting apiserver metrics in OpenMetrics format. + enabled: true + # -- Configure the port the apiserver metric server listens on. + port: 9962 + kvstoremesh: + # -- Enables exporting KVStoreMesh metrics in OpenMetrics format. + enabled: true + # -- Configure the port the KVStoreMesh metric server listens on. + port: 9964 + etcd: + # -- Enables exporting etcd metrics in OpenMetrics format. + enabled: true + # -- Set level of detail for etcd metrics; specify 'extensive' to include server side gRPC histogram metrics. + mode: basic + # -- Configure the port the etcd metric server listens on. + port: 9963 + serviceMonitor: + # -- Enable service monitor. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor clustermesh-apiserver + labels: {} + # -- Annotations to add to ServiceMonitor clustermesh-apiserver + annotations: {} + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + + # -- Interval for scrape metrics (apiserver metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + metricRelabelings: ~ + kvstoremesh: + # -- Interval for scrape metrics (KVStoreMesh metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + metricRelabelings: ~ + etcd: + # -- Interval for scrape metrics (etcd metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + metricRelabelings: ~ +# -- Configure external workloads support +externalWorkloads: + # -- Enable support for external workloads, such as VMs (false by default). + enabled: false +# -- Configure cgroup related configuration +cgroup: + autoMount: + # -- Enable auto mount of cgroup2 filesystem. + # When `autoMount` is enabled, cgroup2 filesystem is mounted at + # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: true + # -- Init Container Cgroup Automount resource limits & requests + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) + hostRoot: /run/cilium/cgroupv2 +# -- Configure sysctl override described in #20072. +sysctlfix: + # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. + enabled: true +# -- Configure whether to enable auto detect of terminating state for endpoints +# in order to support graceful termination. +enableK8sTerminatingEndpoint: true +# -- Configure whether to unload DNS policy rules on graceful shutdown +# dnsPolicyUnloadOnShutdown: false + +# -- Configure the key of the taint indicating that Cilium is not ready on the node. +# When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. +agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" +dnsProxy: + # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. + socketLingerTimeout: 10 + # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. + dnsRejectResponseCode: refused + # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. + enableDnsCompression: true + # -- Maximum number of IPs to maintain per FQDN name for each endpoint. + endpointMaxIpPerHostname: 1000 + # -- Time during which idle but previously active connections with expired DNS lookups are still considered alive. + idleConnectionGracePeriod: 0s + # -- Maximum number of IPs to retain for expired DNS lookups with still-active connections. + maxDeferredConnectionDeletes: 10000 + # -- The minimum time, in seconds, to use DNS data for toFQDNs policies. If + # the upstream DNS server returns a DNS record with a shorter TTL, Cilium + # overwrites the TTL with this value. Setting this value to zero means that + # Cilium will honor the TTLs returned by the upstream DNS server. + minTtl: 0 + # -- DNS cache data at this path is preloaded on agent startup. + preCache: "" + # -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. + proxyPort: 0 + # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. + proxyResponseMaxDelay: 100ms + # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) + # enableTransparentMode: true +# -- SCTP Configuration Values +sctp: + # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. + enabled: false +# -- Enable Non-Default-Deny policies +enableNonDefaultDenyPolicies: true +# Configuration for types of authentication for Cilium (beta) +authentication: + # -- Enable authentication processing and garbage collection. + # Note that if disabled, policy enforcement will still block requests that require authentication. + # But the resulting authentication requests for these requests will not be processed, therefore the requests not be allowed. + enabled: true + # -- Buffer size of the channel Cilium uses to receive authentication events from the signal map. + queueSize: 1024 + # -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers. + rotatedIdentitiesQueueSize: 1024 + # -- Interval for garbage collection of auth map entries. + gcInterval: "5m0s" + # Configuration for Cilium's service-to-service mutual authentication using TLS handshakes. + # Note that this is not full mTLS support without also enabling encryption of some form. + # Current encryption options are WireGuard or IPsec, configured in encryption block above. + mutual: + # -- Port on the agent where mutual authentication handshakes between agents will be performed + port: 4250 + # -- Timeout for connecting to the remote node TCP socket + connectTimeout: 5s + # Settings for SPIRE + spire: + # -- Enable SPIRE integration (beta) + enabled: false + # -- Annotations to be added to all top-level spire objects (resources under templates/spire) + annotations: {} + # Settings to control the SPIRE installation and configuration + install: + # -- Enable SPIRE installation. + # This will only take effect only if authentication.mutual.spire.enabled is true + enabled: true + # -- SPIRE namespace to install into + namespace: cilium-spire + # -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. + existingNamespace: false + # -- init container image of SPIRE agent and server + initImage: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "docker.io/library/busybox" + tag: "1.37.0" + digest: "sha256:d82f458899c9696cb26a7c02d5568f81c8c8223f8661bb2a7988b269c8b9051e" + useDigest: true + pullPolicy: "IfNotPresent" + # SPIRE agent configuration + agent: + # -- The priority class to use for the spire agent + priorityClassName: "" + # -- SPIRE agent image + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "ghcr.io/spiffe/spire-agent" + tag: "1.9.6" + digest: "sha256:5106ac601272a88684db14daf7f54b9a45f31f77bb16a906bd5e87756ee7b97c" + useDigest: true + pullPolicy: "IfNotPresent" + # -- SPIRE agent service account + serviceAccount: + create: true + name: spire-agent + # -- SPIRE agent annotations + annotations: {} + # -- SPIRE agent labels + labels: {} + # -- container resource limits & requests + resources: {} + # -- SPIRE Workload Attestor kubelet verification. + skipKubeletVerification: true + # -- SPIRE agent tolerations configuration + # By default it follows the same tolerations as the agent itself + # to allow the Cilium agent on this node to connect to SPIRE. + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node.cloudprovider.kubernetes.io/uninitialized + effect: NoSchedule + value: "true" + - key: CriticalAddonsOnly + operator: "Exists" + # -- SPIRE agent affinity configuration + affinity: {} + # -- SPIRE agent nodeSelector configuration + # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- Security context to be added to spire agent pods. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # -- Security context to be added to spire agent containers. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + server: + # -- The priority class to use for the spire server + priorityClassName: "" + # -- SPIRE server image + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "ghcr.io/spiffe/spire-server" + tag: "1.9.6" + digest: "sha256:59a0b92b39773515e25e68a46c40d3b931b9c1860bc445a79ceb45a805cab8b4" + useDigest: true + pullPolicy: "IfNotPresent" + # -- SPIRE server service account + serviceAccount: + create: true + name: spire-server + # -- SPIRE server init containers + initContainers: [] + # -- SPIRE server annotations + annotations: {} + # -- SPIRE server labels + labels: {} + # SPIRE server service configuration + # -- container resource limits & requests + resources: {} + service: + # -- Service type for the SPIRE server service + type: ClusterIP + # -- Annotations to be added to the SPIRE server service + annotations: {} + # -- Labels to be added to the SPIRE server service + labels: {} + # -- SPIRE server affinity configuration + affinity: {} + # -- SPIRE server nodeSelector configuration + # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- SPIRE server tolerations configuration + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # SPIRE server datastorage configuration + dataStorage: + # -- Enable SPIRE server data storage + enabled: true + # -- Size of the SPIRE server data storage + size: 1Gi + # -- Access mode of the SPIRE server data storage + accessMode: ReadWriteOnce + # @schema + # type: [null, string] + # @schema + # -- StorageClass of the SPIRE server data storage + storageClass: null + # -- Security context to be added to spire server pods. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # -- Security context to be added to spire server containers. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + # SPIRE CA configuration + ca: + # -- SPIRE CA key type + # AWS requires the use of RSA. EC cryptography is not supported + keyType: "rsa-4096" + # -- SPIRE CA Subject + subject: + country: "US" + organization: "SPIRE" + commonName: "Cilium SPIRE CA" + # @schema + # type: [null, string] + # @schema + # -- SPIRE server address used by Cilium Operator + # + # If k8s Service DNS along with port number is used (e.g. ..svc(.*): format), + # Cilium Operator will resolve its address by looking up the clusterIP from Service resource. + # + # Example values: 10.0.0.1:8081, spire-server.cilium-spire.svc:8081 + serverAddress: ~ + # -- SPIFFE trust domain to use for fetching certificates + trustDomain: spiffe.cilium + # -- SPIRE socket path where the SPIRE delegated api agent is listening + adminSocketPath: /run/spire/sockets/admin.sock + # -- SPIRE socket path where the SPIRE workload agent is listening. + # Applies to both the Cilium Agent and Operator + agentSocketPath: /run/spire/sockets/agent/agent.sock + # -- SPIRE connection timeout + connectionTimeout: 30s +# -- Enable Internal Traffic Policy +enableInternalTrafficPolicy: true +# -- Enable LoadBalancer IP Address Management +enableLBIPAM: true + diff --git a/cilium/src/values1.18.2.yaml b/cilium/src/values1.18.2.yaml new file mode 100644 index 0000000..078b397 --- /dev/null +++ b/cilium/src/values1.18.2.yaml @@ -0,0 +1,4012 @@ +# File generated by install/kubernetes/Makefile; DO NOT EDIT. +# This file is based on install/kubernetes/cilium/*values.yaml.tmpl. + + +# @schema +# type: [null, string] +# @schema +# -- namespaceOverride allows to override the destination namespace for Cilium resources. +# This property allows to use Cilium as part of an Umbrella Chart with different targets. +namespaceOverride: "" +# @schema +# type: [null, object] +# @schema +# -- commonLabels allows users to add common labels for all Cilium resources. +commonLabels: {} +# @schema +# type: [null, string] +# @schema +# -- upgradeCompatibility helps users upgrading to ensure that the configMap for +# Cilium will not change critical values to ensure continued operation +# This flag is not required for new installations. +# For example: '1.7', '1.8', '1.9' +upgradeCompatibility: null +debug: + # -- Enable debug logging + enabled: false + # @schema + # type: [null, string] + # @schema + # -- Configure verbosity levels for debug logging + # This option is used to enable debug messages for operations related to such + # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is + # for enabling debug messages emitted per request, message and connection. + # Multiple values can be set via a space-separated string (e.g. "datapath envoy"). + # + # Applicable values: + # - flow + # - kvstore + # - envoy + # - datapath + # - policy + verbose: ~ + # -- Set the agent-internal metrics sampling frequency. This sets the + # frequency of the internal sampling of the agent metrics. These are + # available via the "cilium-dbg shell -- metrics -s" command and are + # part of the metrics HTML page included in the sysdump. + # @schema + # type: [null, string] + # @schema + metricsSamplingInterval: "5m" +rbac: + # -- Enable creation of Resource-Based Access Control configuration. + create: true +# -- Configure image pull secrets for pulling container images +imagePullSecrets: [] +# - name: "image-pull-secret" + +# -- Configure iptables--random-fully. Disabled by default. View https://github.com/cilium/cilium/issues/13037 for more information. +iptablesRandomFully: false +# -- (string) Kubernetes config path +# @default -- `"~/.kube/config"` +kubeConfigPath: "" +# -- Configure the Kubernetes service endpoint dynamically using a ConfigMap. Mutually exclusive with `k8sServiceHost`. +k8sServiceHostRef: + # @schema + # type: [string, null] + # @schema + # -- (string) name of the ConfigMap containing the Kubernetes service endpoint + name: + # @schema + # type: [string, null] + # @schema + # -- (string) Key in the ConfigMap containing the Kubernetes service endpoint + key: +# -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap +k8sServiceHost: localhost +# @schema +# type: [string, integer] +# @schema +# -- (string) Kubernetes service port +k8sServicePort: 7445 +# @schema +# type: [null, string] +# @schema +# -- (string) When `k8sServiceHost=auto`, allows to customize the configMap name. It defaults to `cluster-info`. +k8sServiceLookupConfigMapName: "" +# @schema +# type: [null, string] +# @schema +# -- (string) When `k8sServiceHost=auto`, allows to customize the namespace that contains `k8sServiceLookupConfigMapName`. It defaults to `kube-public`. +k8sServiceLookupNamespace: "" +# -- Configure the client side rate limit for the agent +# +# If the amount of requests to the Kubernetes API server exceeds the configured +# rate limit, the agent will start to throttle requests by delaying +# them until there is budget or the request times out. +k8sClientRateLimit: + # @schema + # type: [null, integer] + # @schema + # -- (int) The sustained request rate in requests per second. + # @default -- 10 + qps: 20 + # @schema + # type: [null, integer] + # @schema + # -- (int) The burst request rate in requests per second. + # The rate limiter will allow short bursts with a higher rate. + # @default -- 20 + burst: 100 + # -- Configure the client side rate limit for the Cilium Operator + operator: + # @schema + # type: [null, integer] + # @schema + # -- (int) The sustained request rate in requests per second. + # @default -- 100 + qps: + # @schema + # type: [null, integer] + # @schema + # -- (int) The burst request rate in requests per second. + # The rate limiter will allow short bursts with a higher rate. + # @default -- 200 + burst: +# -- Configure exponential backoff for client-go in Cilium agent. +k8sClientExponentialBackoff: + # -- Enable exponential backoff for client-go in Cilium agent. + enabled: true + # -- Configure base (in seconds) for exponential backoff. + backoffBaseSeconds: 1 + # -- Configure maximum duration (in seconds) for exponential backoff. + backoffMaxDurationSeconds: 120 +cluster: + # -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. + # It must respect the following constraints: + # * It must contain at most 32 characters; + # * It must begin and end with a lower case alphanumeric character; + # * It may contain lower case alphanumeric characters and dashes between. + # The "default" name cannot be used if the Cluster ID is different from 0. + name: talos + # -- (int) Unique ID of the cluster. Must be unique across all connected + # clusters and in the range of 1 to 255. Only required for Cluster Mesh, + # may be 0 if Cluster Mesh is not used. + id: 1 +# -- Define serviceAccount names for components. +# @default -- Component's fully qualified name. +serviceAccounts: + cilium: + create: true + name: cilium + automount: true + annotations: {} + nodeinit: + create: true + # -- Enabled is temporary until https://github.com/cilium/cilium-cli/issues/1396 is implemented. + # Cilium CLI doesn't create the SAs for node-init, thus the workaround. Helm is not affected by + # this issue. Name and automount can be configured, if enabled is set to true. + # Otherwise, they are ignored. Enabled can be removed once the issue is fixed. + # Cilium-nodeinit DS must also be fixed. + enabled: false + name: cilium-nodeinit + automount: true + annotations: {} + envoy: + create: true + name: cilium-envoy + automount: true + annotations: {} + operator: + create: true + name: cilium-operator + automount: true + annotations: {} + preflight: + create: true + name: cilium-pre-flight + automount: true + annotations: {} + relay: + create: true + name: hubble-relay + automount: false + annotations: {} + ui: + create: true + name: hubble-ui + automount: true + annotations: {} + clustermeshApiserver: + create: true + name: clustermesh-apiserver + automount: true + annotations: {} + # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob + clustermeshcertgen: + create: true + name: clustermesh-apiserver-generate-certs + automount: true + annotations: {} + # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob + hubblecertgen: + create: true + name: hubble-generate-certs + automount: true + annotations: {} +# -- Configure termination grace period for cilium-agent DaemonSet. +terminationGracePeriodSeconds: 1 +# -- Install the cilium agent resources. +agent: true +# -- Agent daemonset name. +name: cilium +# -- Roll out cilium agent pods automatically when configmap is updated. +rollOutCiliumPods: true +# -- Agent container image. +image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.18.2" + pullPolicy: "IfNotPresent" + # cilium-digest + digest: "sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667" + useDigest: true +# -- Scheduling configurations for cilium pods +scheduling: + # @schema + # enum: ["anti-affinity", "kube-scheduler"] + # @schema + # -- Mode specifies how Cilium daemonset pods should be scheduled to Nodes. + # `anti-affinity` mode applies a pod anti-affinity rule to the cilium daemonset. + # Pod anti-affinity may significantly impact scheduling throughput for large clusters. + # See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + # `kube-scheduler` mode forgoes the anti-affinity rule for full scheduling throughput. + # Kube-scheduler avoids host port conflict when scheduling pods. + # @default -- Defaults to apply a pod anti-affinity rule to the agent pod - `anti-affinity` + mode: anti-affinity +# -- Affinity for cilium-agent. +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium +# -- Node selector for cilium-agent. +nodeSelector: + kubernetes.io/os: linux +# -- Node tolerations for agent scheduling to nodes with taints +# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +# -- The priority class to use for cilium-agent. +priorityClassName: "" +# -- DNS policy for Cilium agent pods. +# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: "" +# -- Additional containers added to the cilium DaemonSet. +extraContainers: [] +# -- Additional initContainers added to the cilium Daemonset. +extraInitContainers: [] +# -- Additional agent container arguments. +extraArgs: [] +# -- Additional agent container environment variables. +extraEnv: [] +# -- Additional agent hostPath mounts. +extraHostPathMounts: [] +# - name: host-mnt-data +# mountPath: /host/mnt/data +# hostPath: /mnt/data +# hostPathType: Directory +# readOnly: true +# mountPropagation: HostToContainer + +# -- Additional agent volumes. +extraVolumes: [] +# -- Additional agent volumeMounts. +extraVolumeMounts: [] +# -- extraConfig allows you to specify additional configuration parameters to be +# included in the cilium-config configmap. +extraConfig: {} +# my-config-a: "1234" +# my-config-b: |- +# test 1 +# test 2 +# test 3 + +# -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent) +annotations: {} +# -- Security Context for cilium-agent pods. +podSecurityContext: + # -- AppArmorProfile options for the `cilium-agent` and init containers + appArmorProfile: + type: "Unconfined" + seccompProfile: + type: "Unconfined" +# -- Annotations to be added to agent pods +podAnnotations: {} +# -- Labels to be added to agent pods +podLabels: {} +# -- Agent resource limits & requests +# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +resources: {} +# limits: +# cpu: 4000m +# memory: 4Gi +# requests: +# cpu: 100m +# memory: 512Mi + +# -- resources & limits for the agent init containers +initResources: {} +securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- disable privilege escalation + allowPrivilegeEscalation: false + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-agent` and init containers + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-agent` container + ciliumAgent: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + #- SYS_MODULE + # Needed to switch network namespaces (used for health endpoint, socket-LB). + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + # Allow discretionary access control (e.g. required for package installation) + - DAC_OVERRIDE + # Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation) + - FOWNER + # Allow to execute program that changes GID (e.g. required for package installation) + - SETGID + # Allow to execute program that changes UID (e.g. required for package installation) + - SETUID + # -- Capabilities for the `mount-cgroup` init container + mountCgroup: + # Only used for 'mount' cgroup + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- capabilities for the `apply-sysctl-overwrites` init container + applySysctlOverwrites: + # Required in order to access host's /etc/sysctl.d dir + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- Capabilities for the `clean-cilium-state` init container + cleanCiliumState: + # Most of the capabilities here are the same ones used in the + # cilium-agent's container because this container can be used to + # uninstall all Cilium resources, and therefore it is likely that + # will need the same capabilities. + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used in iptables. Consider removing once we are iptables-free + #- SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF +# -- Cilium agent update strategy +updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 2 +# Configuration Values for cilium-agent +aksbyocni: + # -- Enable AKS BYOCNI integration. + # Note that this is incompatible with AKS clusters not created in BYOCNI mode: + # use Azure integration (`azure.enabled`) instead. + enabled: false +# @schema +# type: [boolean, string] +# @schema +# -- Enable installation of PodCIDR routes between worker +# nodes if worker nodes share a common L2 network segment. +autoDirectNodeRoutes: false +# -- Enable skipping of PodCIDR routes between worker +# nodes if the worker nodes are in a different L2 network segment. +directRoutingSkipUnreachable: false +# -- Annotate k8s node upon initialization with Cilium's metadata. +annotateK8sNode: false +azure: + # -- Enable Azure integration. + # Note that this is incompatible with AKS clusters created in BYOCNI mode: use + # AKS BYOCNI integration (`aksbyocni.enabled`) instead. + enabled: false + # usePrimaryAddress: false + # resourceGroup: group1 + # subscriptionID: 00000000-0000-0000-0000-000000000000 + # tenantID: 00000000-0000-0000-0000-000000000000 + # clientID: 00000000-0000-0000-0000-000000000000 + # clientSecret: 00000000-0000-0000-0000-000000000000 + # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000 +alibabacloud: + # -- Enable AlibabaCloud ENI integration + enabled: false +# -- Enable bandwidth manager to optimize TCP and UDP workloads and allow +# for rate-limiting traffic from individual Pods with EDT (Earliest Departure +# Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. +bandwidthManager: + # -- Enable bandwidth manager infrastructure (also prerequirement for BBR) + enabled: false + # -- Activate BBR TCP congestion control for Pods + bbr: false + # -- Activate BBR TCP congestion control for Pods in the host namespace only. + bbrHostNamespaceOnly: false +# -- Configure standalone NAT46/NAT64 gateway +nat46x64Gateway: + # -- Enable RFC6052-prefixed translation + enabled: false +# -- Configure L2 announcements +l2announcements: + # -- Enable L2 announcements + enabled: true + # -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked + # leaseDuration: 15s + # -- The interval at which the leader will renew the lease + # leaseRenewDeadline: 5s + # -- The timeout between retries if renewal fails + # leaseRetryPeriod: 2s +# -- Configure L2 pod announcements +l2podAnnouncements: + # -- Enable L2 pod announcements + enabled: false + # -- Interface used for sending Gratuitous ARP pod announcements + interface: "eth0" + # -- A regular expression matching interfaces used for sending Gratuitous ARP pod announcements + # interfacePattern: "" +# -- This feature set enables virtual BGP routers to be created via +# CiliumBGPPeeringPolicy CRDs. +bgpControlPlane: + # -- Enables the BGP control plane. + enabled: false + # -- SecretsNamespace is the namespace which BGP support will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for BGP secrets. + create: false + # -- The name of the secret namespace to which Cilium agents are given read access + name: kube-system + # -- Status reporting settings (BGPv2 only) + statusReport: + # -- Enable/Disable BGPv2 status reporting + # It is recommended to enable status reporting in general, but if you have any issue + # such as high API server load, you can disable it by setting this to false. + enabled: true + # -- BGP router-id allocation mode + routerIDAllocation: + # -- BGP router-id allocation mode. In default mode, the router-id is derived from the IPv4 address if it is available, or else it is determined by the lower 32 bits of the MAC address. + mode: "default" + # -- IP pool to allocate the BGP router-id from when the mode is ip-pool. + ipPool: "" + # -- Legacy BGP ORIGIN attribute settings (BGPv2 only) + legacyOriginAttribute: + # -- Enable/Disable advertising LoadBalancerIP routes with the legacy + # BGP ORIGIN attribute value INCOMPLETE (2) instead of the default IGP (0). + # Enable for compatibility with the legacy behavior of MetalLB integration. + enabled: false +pmtuDiscovery: + # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to + # the client. + enabled: false +bpf: + autoMount: + # -- Enable automatic mount of BPF filesystem + # When `autoMount` is enabled, the BPF filesystem is mounted at + # `bpf.root` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # bpffs filesystem at the specified `bpf.root` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: true + # -- Configure the mount point for the BPF filesystem + root: /sys/fs/bpf + # -- Enables pre-allocation of eBPF map values. This increases + # memory usage but can reduce latency. + preallocateMaps: false + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries in auth map. + # @default -- `524288` + authMapMax: ~ + # -- Enable CT accounting for packets and bytes + ctAccounting: false + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries in the TCP connection tracking + # table. + # @default -- `524288` + ctTcpMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the non-TCP connection + # tracking table. + # @default -- `262144` + ctAnyMax: ~ + # -- Control to use a distributed per-CPU backend memory for the core BPF LRU maps + # which Cilium uses. This improves performance significantly, but it is also + # recommended to increase BPF map sizing along with that. + distributedLRU: + # -- Enable distributed LRU backend memory. For compatibility with existing + # installations it is off by default. + enabled: false + # -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. + # Helm configuration for BPF events map rate limiting is experimental and might change + # in upcoming releases. + events: + # -- Default settings for all types of events except dbg and pcap. + default: + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the limit of messages per second that can be written to + # BPF events map. The number of messages is averaged, meaning that if no messages + # were written to the map over 5 seconds, it's possible to write more events + # in the 6th second. If rateLimit is greater than 0, non-zero value for burstLimit must + # also be provided lest the configuration is considered invalid. Setting both burstLimit + # and rateLimit to 0 disables BPF events rate limiting. + # @default -- `0` + rateLimit: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of messages that can be written to BPF events + # map in 1 second. If burstLimit is greater than 0, non-zero value for rateLimit must + # also be provided lest the configuration is considered invalid. Setting both burstLimit + # and rateLimit to 0 disables BPF events rate limiting. + # @default -- `0` + burstLimit: ~ + drop: + # -- Enable drop events. + enabled: true + policyVerdict: + # -- Enable policy verdict events. + enabled: true + trace: + # -- Enable trace events. + enabled: true + # @schema + # type: [null, integer] + # @schema + # -- Configure the maximum number of service entries in the + # load balancer maps. + lbMapMax: 65536 + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the NAT table. + # @default -- `524288` + natMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the neighbor table. + # @default -- `524288` + neighMax: ~ + # @schema + # type: [null, integer] + # @schema + # @default -- `16384` + # -- (int) Configures the maximum number of entries for the node table. + nodeMapMax: ~ + # -- Configure the maximum number of entries in endpoint policy map (per endpoint). + # @schema + # type: [null, integer] + # @schema + policyMapMax: 16384 + # -- Configure the maximum number of entries in global policy stats map. + # @schema + # type: [null, integer] + # @schema + policyStatsMapMax: 65536 + # @schema + # type: [null, number, string] + # @schema + # -- (float64) Configure auto-sizing for all BPF maps based on available memory. + # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ + # @default -- `0.0025` + mapDynamicSizeRatio: ~ + # -- Configure the level of aggregation for monitor notifications. + # Valid options are none, low, medium, maximum. + monitorAggregation: medium + # -- Configure the typical time between monitor notifications for + # active connections. + monitorInterval: "5s" + # -- Configure which TCP flags trigger notifications when seen for the + # first time in a connection. + monitorFlags: "all" + # -- (bool) Allow cluster external access to ClusterIP services. + # @default -- `false` + lbExternalClusterIP: false + # -- (bool) Enable loadBalancerSourceRanges CIDR filtering for all service + # types, not just LoadBalancer services. The corresponding NodePort and + # ClusterIP (if enabled for cluster-external traffic) will also apply the + # CIDR filter. + # @default -- `false` + lbSourceRangeAllTypes: false + # -- (bool) Enable the option to define the load balancing algorithm on + # a per-service basis through service.cilium.io/lb-algorithm annotation. + # @default -- `false` + lbAlgorithmAnnotation: false + # -- (bool) Enable the option to define the load balancing mode (SNAT or DSR) + # on a per-service basis through service.cilium.io/forwarding-mode annotation. + # @default -- `false` + lbModeAnnotation: false + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Enable native IP masquerade support in eBPF + # @default -- `false` + masquerade: ~ + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Configure whether direct routing mode should route traffic via + # host stack (true) or directly and more efficiently out of BPF (false) if + # the kernel supports it. The latter has the implication that it will also + # bypass netfilter in the host namespace. + # @default -- `false` + hostLegacyRouting: true + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Configure the eBPF-based TPROXY (beta) to reduce reliance on iptables rules + # for implementing Layer 7 policy. + # @default -- `false` + tproxy: ~ + # @schema + # type: [null, array] + # @schema + # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass. + # [0] will allow all VLAN id's without any filtering. + # @default -- `[]` + vlanBypass: ~ + # -- (bool) Disable ExternalIP mitigation (CVE-2020-8554) + # @default -- `false` + disableExternalIPMitigation: false + # -- (bool) Attach endpoint programs using tcx instead of legacy tc hooks on + # supported kernels. + # @default -- `true` + enableTCX: true + # -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2) + # @default -- `veth` + datapathMode: veth +# -- Enable BPF clock source probing for more efficient tick retrieval. +bpfClockProbe: false +# -- Clean all eBPF datapath state from the initContainer of the cilium-agent +# DaemonSet. +# +# WARNING: Use with care! +cleanBpfState: false +# -- Clean all local Cilium state from the initContainer of the cilium-agent +# DaemonSet. Implies cleanBpfState: true. +# +# WARNING: Use with care! +cleanState: false +# -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" +# init container before launching cilium-agent. +# More context can be found in the commit message of below PR +# https://github.com/cilium/cilium/pull/20123 +waitForKubeProxy: false +cni: + # -- Install the CNI configuration and binary files into the filesystem. + install: true + # -- Remove the CNI configuration and binary files on agent shutdown. Enable this + # if you're removing Cilium from the cluster. Disable this to prevent the CNI + # configuration file from being removed during agent upgrade, which can cause + # nodes to go unmanageable. + uninstall: false + # @schema + # type: [null, string] + # @schema + # -- Configure chaining on top of other CNI plugins. Possible values: + # - none + # - aws-cni + # - flannel + # - generic-veth + # - portmap + chainingMode: ~ + # @schema + # type: [null, string] + # @schema + # -- A CNI network name in to which the Cilium plugin should be added as a chained plugin. + # This will cause the agent to watch for a CNI network with this network name. When it is + # found, this will be used as the basis for Cilium's CNI configuration file. If this is + # set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode + # of aws-cni implies a chainingTarget of aws-cni. + chainingTarget: ~ + # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the + # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. + # This ensures no Pods can be scheduled using other CNI plugins during Cilium + # agent downtime. + exclusive: true + # -- Configure the log file for CNI logging with retention policy of 7 days. + # Disable CNI file logging by setting this field to empty explicitly. + logFile: /var/run/cilium/cilium-cni.log + # -- Skip writing of the CNI configuration. This can be used if + # writing of the CNI configuration is performed by external automation. + customConf: false + # -- Configure the path to the CNI configuration directory on the host. + confPath: /etc/cni/net.d + # -- Configure the path to the CNI binary directory on the host. + binPath: /opt/cni/bin + # -- Specify the path to a CNI config to read from on agent start. + # This can be useful if you want to manage your CNI + # configuration outside of a Kubernetes environment. This parameter is + # mutually exclusive with the 'cni.configMap' parameter. The agent will + # write this to 05-cilium.conflist on startup. + # readCniConf: /host/etc/cni/net.d/05-sample.conflist.input + + # -- When defined, configMap will mount the provided value as ConfigMap and + # interpret the 'cni.configMapKey' value as CNI configuration file and write it + # when the agent starts up. + configMap: "" + # -- Configure the key in the CNI ConfigMap to read the contents of + # the CNI configuration from. For this to be effective, the 'cni.configMap' + # parameter must be specified too. + # Note that the 'cni.configMap' parameter is the name of the ConfigMap, while + # 'cni.configMapKey' is the name of the key in the ConfigMap data containing + # the actual configuration. + configMapKey: cni-config + # -- Configure the path to where to mount the ConfigMap inside the agent pod. + confFileMountPath: /tmp/cni-configuration + # -- Configure the path to where the CNI configuration directory is mounted + # inside the agent pod. + hostConfDirMountPath: /host/etc/cni/net.d + # -- Specifies the resources for the cni initContainer + resources: + requests: + cpu: 100m + memory: 10Mi + # -- Enable route MTU for pod netns when CNI chaining is used + enableRouteMTUForCNIChaining: false + # -- Enable the removal of iptables rules created by the AWS CNI VPC plugin. + iptablesRemoveAWSRules: true +# @schema +# type: [null, number] +# @schema +# -- (float64) Ratio of the connectivity probe frequency vs resource usage, a float in +# [0, 1]. 0 will give more frequent probing, 1 will give less frequent probing. Probing +# frequency is dynamically adjusted based on the cluster size. +# @default -- `0.5` +connectivityProbeFrequencyRatio: ~ +# -- (string) Configure how frequently garbage collection should occur for the datapath +# connection tracking table. +# @default -- `"0s"` +conntrackGCInterval: "" +# -- (string) Configure the maximum frequency for the garbage collection of the +# connection tracking table. Only affects the automatic computation for the frequency +# and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently +# clean up unused identities created from ToFQDN policies. +conntrackGCMaxInterval: "" +# -- (string) Configure timeout in which Cilium will exit if CRDs are not available +# @default -- `"5m"` +crdWaitTimeout: "" +# -- Tail call hooks for custom eBPF programs. +customCalls: + # -- Enable tail call hooks for custom eBPF programs. + enabled: false +daemon: + # -- Configure where Cilium runtime state should be stored. + runPath: "/var/run/cilium" + # @schema + # type: [null, string] + # @schema + # -- Configure a custom list of possible configuration override sources + # The default is "config-map:cilium-config,cilium-node-config". For supported + # values, see the help text for the build-config subcommand. + # Note that this value should be a comma-separated string. + configSources: ~ + # @schema + # type: [null, string] + # @schema + # -- allowedConfigOverrides is a list of config-map keys that can be overridden. + # That is to say, if this value is set, config sources (excepting the first one) can + # only override keys in this list. + # + # This takes precedence over blockedConfigOverrides. + # + # By default, all keys may be overridden. To disable overrides, set this to "none" or + # change the configSources variable. + allowedConfigOverrides: ~ + # @schema + # type: [null, string] + # @schema + # -- blockedConfigOverrides is a list of config-map keys that may not be overridden. + # In other words, if any of these keys appear in a configuration source excepting the + # first one, they will be ignored + # + # This is ignored if allowedConfigOverrides is set. + # + # By default, all keys may be overridden. + blockedConfigOverrides: ~ + # @schema + # type: [null, boolean] + # @schema + # -- enableSourceIPVerification is a boolean flag to enable or disable the Source IP verification + # of endpoints. This flag is useful when Cilium is chained with other CNIs. + # + # By default, this functionality is enabled + enableSourceIPVerification: true +# -- Specify which network interfaces can run the eBPF datapath. This means +# that a packet sent from a pod to a destination outside the cluster will be +# masqueraded (to an output device IPv4 address), if the output device runs the +# program. When not specified, probing will automatically detect devices that have +# a non-local route. This should be used only when autodetection is not suitable. +devices: eth+ + +# -- Forces the auto-detection of devices, even if specific devices are explicitly listed +forceDeviceDetection: false +# -- Chains to ignore when installing feeder rules. +# disableIptablesFeederRules: "" + +# -- Limit iptables-based egress masquerading to interface selector. +# egressMasqueradeInterfaces: "" + +# -- Enable setting identity mark for local traffic. +# enableIdentityMark: true + +# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. +# enableK8sEndpointSlice: true + +# -- CiliumEndpointSlice configuration options. +ciliumEndpointSlice: + # -- Enable Cilium EndpointSlice feature. + enabled: false + # -- List of rate limit options to be used for the CiliumEndpointSlice controller. + # Each object in the list must have the following fields: + # nodes: Count of nodes at which to apply the rate limit. + # limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. + # burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. + rateLimits: + - nodes: 0 + limit: 10 + burst: 20 + - nodes: 100 + limit: 50 + burst: 100 +# @schema +# enum: ["agent", "operator", "both"] +# @schema +# -- Control whether CiliumIdentities are created by the agent ("agent"), the operator ("operator") or both ("both"). +# "Both" should be used only to migrate between "agent" and "operator". +# Operator-managed identities is a beta feature. +identityManagementMode: "agent" +envoyConfig: + # -- Enable CiliumEnvoyConfig CRD + # CiliumEnvoyConfig CRD can also be implicitly enabled by other options. + enabled: false + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for CiliumEnvoyConfig CRDs. + create: true + # -- The name of the secret namespace to which Cilium agents are given read access. + name: cilium-secrets + # -- Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated. + retryInterval: 15s +ingressController: + # -- Enable cilium ingress controller + # This will automatically set enable-envoy-config as well. + enabled: true + # -- Set cilium ingress controller to be the default ingress controller + # This will let cilium ingress controller route entries without ingress class set + default: true + # -- Default ingress load balancer mode + # Supported values: shared, dedicated + # For granular control, use the following annotations on the ingress resource: + # "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). + loadbalancerMode: shared + # -- Enforce https for host having matching TLS host in Ingress. + # Incoming traffic to http listener will return 308 http error code with respective location in header. + enforceHttps: true + # -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service + ingressLBAnnotationPrefixes: ['lbipam.cilium.io', 'nodeipam.cilium.io', 'service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] + # @schema + # type: [null, string] + # @schema + # -- Default secret namespace for ingresses without .spec.tls[].secretName set. + defaultSecretNamespace: + # @schema + # type: [null, string] + # @schema + # -- Default secret name for ingresses without .spec.tls[].secretName set. + defaultSecretName: + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Ingress. + create: true + # -- Name of Ingress secret namespace. + name: cilium-secrets + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + # -- Load-balancer service in shared mode. + # This is a single load-balancer service for all Ingress resources. + service: + # -- Service name + name: cilium-ingress + # -- Labels to be added for the shared LB service + labels: {} + # -- Annotations to be added for the shared LB service + annotations: + io.cilium/lb-ipam-ips: 192.168.0.180 + # -- Service type for the shared LB service + type: LoadBalancer + # @schema + # type: [null, integer] + # @schema + # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service + insecureNodePort: ~ + # @schema + # type: [null, integer] + # @schema + # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service + secureNodePort: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP on the shared LB service + loadBalancerIP: ~ + # @schema + # type: [null, boolean] + # @schema + # -- Configure if node port allocation is required for LB service + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + allocateLoadBalancerNodePorts: ~ + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode. + # Valid values are "Cluster" and "Local". + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # -- Configure a specific port on the host network that gets used for the shared listener. + sharedListenerPort: 8080 + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} +gatewayAPI: + # -- Enable support for Gateway API in cilium + # This will automatically set enable-envoy-config as well. + enabled: true + # -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. + enableAppProtocol: false + # -- Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1. + # Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`. + enableAlpn: false + # -- The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address. + xffNumTrustedHops: 0 + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local". + # Note that this value will be ignored when `hostNetwork.enabled == true`. + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + gatewayClass: + # -- Enable creation of GatewayClass resource + # The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster. + # Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively. + create: auto + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Gateway API. + create: true + # -- Name of Gateway API secret namespace. + name: cilium-secrets + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} +# -- Enables the fallback compatibility solution for when the xt_socket kernel +# module is missing and it is needed for the datapath L7 redirection to work +# properly. See documentation for details on when this can be disabled: +# https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. +enableXTSocketFallback: true +encryption: + # -- Enable transparent network encryption. + enabled: false + # -- Encryption method. Can be either ipsec or wireguard. + type: ipsec + # -- Enable encryption for pure node to node traffic. + # This option is only effective when encryption.type is set to "wireguard". + nodeEncryption: false + # -- Configure the WireGuard Pod2Pod strict mode. + strictMode: + # -- Enable WireGuard Pod2Pod strict mode. + enabled: false + # -- CIDR for the WireGuard Pod2Pod strict mode. + cidr: "" + # -- Allow dynamic lookup of remote node identities. + # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. + allowRemoteNodeIdentities: false + ipsec: + # -- Name of the key file inside the Kubernetes secret configured via secretName. + keyFile: keys + # -- Path to mount the secret inside the Cilium pod. + mountPath: /etc/ipsec + # -- Name of the Kubernetes secret containing the encryption keys. + secretName: cilium-ipsec-keys + # -- The interface to use for encrypted traffic. + interface: "" + # -- Enable the key watcher. If disabled, a restart of the agent will be + # necessary on key rotations. + keyWatcher: true + # -- Maximum duration of the IPsec key rotation. The previous key will be + # removed after that delay. + keyRotationDuration: "5m" + # -- Enable IPsec encrypted overlay + encryptedOverlay: false + wireguard: + # -- Controls WireGuard PersistentKeepalive option. Set 0s to disable. + persistentKeepalive: 0s +endpointHealthChecking: + # -- Enable connectivity health checking between virtual endpoints. + enabled: true +endpointRoutes: + # @schema + # type: [boolean, string] + # @schema + # -- Enable use of per endpoint routes instead of routing via + # the cilium_host interface. + enabled: false +k8sNetworkPolicy: + # -- Enable support for K8s NetworkPolicy + enabled: true +# -- Enable endpoint lockdown on policy map overflow. +endpointLockdownOnMapOverflow: false +eni: + # -- Enable Elastic Network Interface (ENI) integration. + enabled: false + # -- Release IPs not used from the ENI + awsReleaseExcessIPs: false + # -- Enable ENI prefix delegation + awsEnablePrefixDelegation: false + # -- EC2 API endpoint to use + ec2APIEndpoint: "" + # -- Tags to apply to the newly created ENIs + eniTags: {} + # -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable. + # @default -- `"5m"` + gcInterval: "" + # -- Additional tags attached to ENIs created by Cilium. + # Dangling ENIs with this tag will be garbage collected + # @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":""}` + gcTags: {} + # -- If using IAM role for Service Accounts will not try to + # inject identity values from cilium-aws kubernetes secret. + # Adds annotation to service account if managed by Helm. + # See https://github.com/aws/amazon-eks-pod-identity-webhook + iamRole: "" + # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetIDsFilter: [] + # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetTagsFilter: [] + # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances + # are going to be used to create new ENIs + instanceTagsFilter: [] +# fragmentTracking enables IPv4 fragment tracking support in the datapath. +# fragmentTracking: true +gke: + # -- Enable Google Kubernetes Engine integration + enabled: false +# -- Enable connectivity health checking. +healthChecking: true +# -- TCP port for the agent health API. This is not the port for cilium-health. +healthPort: 9879 +# -- Number of ICMP requests sent for each health check before marking a node or endpoint unreachable. +healthCheckICMPFailureThreshold: 3 +# -- Configure the host firewall. +hostFirewall: + # -- Enables the enforcement of host policies in the eBPF datapath. + enabled: false +# -- Configure socket LB +socketLB: + # -- Enable socket LB + enabled: false + # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. + # hostNamespaceOnly: false + # -- Enable terminating pod connections to deleted service backends. + # terminatePodConnections: true + # -- Enables tracing for socket-based load balancing. + # tracing: true +# -- Configure certificate generation for Hubble integration. +# If hubble.tls.auto.method=cronJob, these values are used +# for the Kubernetes CronJob which will be scheduled regularly to +# (re)generate any certificates not provided manually. +certgen: + # -- When set to true the certificate authority secret is created. + generateCA: true + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/certgen" + tag: "v0.2.4" + digest: "sha256:de7b97b1d19a34b674d0c4bc1da4db999f04ae355923a9a994ac3a81e1a1b5ff" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Seconds after which the completed job pod will be deleted + ttlSecondsAfterFinished: 1800 + # -- Labels to be added to hubble-certgen pods + podLabels: {} + # -- Annotations to be added to the hubble-certgen initial Job and CronJob + annotations: + job: {} + cronJob: {} + # -- Node selector for certgen + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- Priority class for certgen + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + priorityClassName: "" + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- Resource limits for certgen + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers + resources: {} + # -- Additional certgen volumes. + extraVolumes: [] + # -- Additional certgen volumeMounts. + extraVolumeMounts: [] + # -- Affinity for certgen + affinity: {} +hubble: + # -- Enable Hubble (true by default). + enabled: true + # -- Annotations to be added to all top-level hubble objects (resources under templates/hubble) + annotations: {} + # -- Buffer size of the channel Hubble uses to receive monitor events. If this + # value is not set, the queue size is set to the default monitor queue size. + # eventQueueSize: "" + + # -- Number of recent flows for Hubble to cache. Defaults to 4095. + # Possible values are: + # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, + # 2047, 4095, 8191, 16383, 32767, 65535 + # eventBufferCapacity: "4095" + + # -- Hubble metrics configuration. + # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics + # for more comprehensive documentation about Hubble metrics. + metrics: + # @schema + # type: [null, array] + # @schema + # -- Configures the list of metrics to collect. If empty or null, metrics + # are disabled. + # Example: + # + # enabled: + # - dns:query;ignoreAAAA + # - drop + # - tcp + # - flow + # - icmp + # - http + # + # You can specify the list of metrics from the helm CLI: + # + # --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" + # + enabled: ~ + # -- Enables exporting hubble metrics in OpenMetrics format. + enableOpenMetrics: false + # -- Configure the port the hubble metric server listens on. + port: 9965 + tls: + # Enable hubble metrics server TLS. + enabled: false + # Configure hubble metrics server TLS. + server: + # -- Name of the Secret containing the certificate and key for the Hubble metrics server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble metrics server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble metrics server key (deprecated). + # Use existingSecret instead. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- Configure mTLS for the Hubble metrics server. + mtls: + # When set to true enforces mutual TLS between Hubble Metrics server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + enabled: false + useSecret: false + # -- Name of the ConfigMap containing the CA to validate client certificates against. + # If mTLS is enabled and this is unspecified, it will default to the + # same CA used for Hubble metrics server certificates. + name: ~ + # -- Entry of the ConfigMap containing the CA. + key: ca.crt + # -- Annotations to be added to hubble-metrics service. + serviceAnnotations: {} + serviceMonitor: + # -- Create ServiceMonitor resources for Prometheus Operator. + # This requires the prometheus CRDs to be available. + # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble + labels: {} + # -- Annotations to add to ServiceMonitor hubble + annotations: {} + # -- jobLabel to add for ServiceMonitor hubble + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # @schema + # type: [null, string] + # @schema + # -- Timeout after which scrape is considered to be failed. + scrapeTimeout: ~ + # -- Relabeling configs for the ServiceMonitor hubble + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + action: replace + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor hubble + metricRelabelings: ~ + # Configure TLS for the ServiceMonitor. + # Note, when using TLS you will either need to specify + # tlsConfig.insecureSkipVerify or specify a CA to use. + tlsConfig: {} + # -- Grafana dashboards for hubble + # grafana can import dashboards based on the label and value + # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} + # Dynamic metrics may be reconfigured without a need of agent restarts. + dynamic: + enabled: false + config: + # ---- Name of configmap with configuration that may be altered to reconfigure metric handlers within a running agent. + configMapName: cilium-dynamic-metrics-config + # ---- True if helm installer should create config map. + # Switch to false if you want to self maintain the file content. + createConfigMap: true + # ---- Exporters configuration in YAML format. + content: [] + # - name: dns + # contextOptions: [] + # includeFilters: [] + # excludeFilters: [] + # -- Unix domain socket path to listen to when Hubble is enabled. + socketPath: /var/run/cilium/hubble.sock + # -- Enables network policy correlation of Hubble flows, i.e. populating `egress_allowed_by`, `ingress_denied_by` fields with policy information. + networkPolicyCorrelation: + # @default -- `true` + enabled: true + # -- Enables redacting sensitive information present in Layer 7 flows. + redact: + enabled: false + http: + # -- Enables redacting URL query (GET) parameters. + # Example: + # + # redact: + # enabled: true + # http: + # urlQuery: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.urlQuery="true" + urlQuery: false + # -- Enables redacting user info, e.g., password when basic auth is used. + # Example: + # + # redact: + # enabled: true + # http: + # userInfo: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.userInfo="true" + userInfo: true + headers: + # -- List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + # Example: + # redact: + # enabled: true + # http: + # headers: + # allow: + # - traceparent + # - tracestate + # - Cache-Control + # + # You can specify the options from the helm CLI: + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control" + allow: [] + # -- List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + # Example: + # redact: + # enabled: true + # http: + # headers: + # deny: + # - Authorization + # - Proxy-Authorization + # + # You can specify the options from the helm CLI: + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization" + deny: [] + kafka: + # -- Enables redacting Kafka's API key. + # Example: + # + # redact: + # enabled: true + # kafka: + # apiKey: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.kafka.apiKey="true" + apiKey: true + # -- An additional address for Hubble to listen to. + # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that + # Hubble is listening on port 4244. + listenAddress: ":4244" + # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. + preferIpv6: false + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Skip Hubble events with unknown cgroup ids + # @default -- `true` + skipUnknownCGroupIDs: ~ + peerService: + # -- Service Port for the Peer service. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + # -- Target Port for the Peer service, must match the hubble.listenAddress' + # port. + targetPort: 4244 + # -- The cluster domain to use to query the Hubble Peer service. It should + # be the local cluster. + clusterDomain: cluster.local + # -- TLS configuration for Hubble + tls: + # -- Enable mutual TLS for listenAddress. Setting this value to false is + # highly discouraged as the Hubble API provides access to potentially + # sensitive network flow metadata and is exposed on the host network. + enabled: true + # -- Configure automatic TLS certificates generation. + auto: + # -- Auto-generate certificates. + # When set to true, automatically generate a CA and certificates to + # enable mTLS between Hubble server and Hubble Relay instances. If set to + # false, the certs for Hubble server need to be provided by setting + # appropriate values below. + enabled: true + # -- Set the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + # + # Defaults to 365 days (1 year) because MacOS does not accept + # self-signed certificates with expirations > 825 days. + certValidityDuration: 365 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Defaults to midnight of the first day of every fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + schedule: "0 0 1 */4 *" + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when hubble.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- The Hubble server certificate and private key + server: + # -- Name of the Secret containing the certificate and key for the Hubble server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble server key (deprecated). + # Use existingSecret instead. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + relay: + # -- Enable Hubble Relay (requires hubble.enabled=true) + enabled: true + # -- Roll out Hubble Relay pods automatically when configmap is updated. + rollOutPods: true + # -- Hubble-relay container image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-relay" + tag: "v1.18.2" + # hubble-relay-digest + digest: "sha256:6079308ee15e44dff476fb522612732f7c5c4407a1017bc3470916242b0405ac" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Specifies the resources for the hubble-relay pods + resources: {} + # -- Number of replicas run for the hubble-relay deployment. + replicas: 1 + # -- Affinity for hubble-replay + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + # -- Pod topology spread constraints for hubble-relay + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- Additional hubble-relay environment variables. + extraEnv: [] + # -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay) + annotations: {} + # -- Annotations to be added to hubble-relay pods + podAnnotations: {} + # -- Labels to be added to hubble-relay pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # @schema + # type: [null, string] + # @schema + # -- How are unhealthy, but running, pods counted for eviction + unhealthyPodEvictionPolicy: null + # -- The priority class to use for hubble-relay + priorityClassName: "" + # -- Configure termination grace period for hubble relay Deployment. + terminationGracePeriodSeconds: 1 + # -- hubble-relay update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 1 + # -- Additional hubble-relay volumes. + extraVolumes: [] + # -- Additional hubble-relay volumeMounts. + extraVolumeMounts: [] + # -- hubble-relay pod security context + podSecurityContext: + fsGroup: 65532 + seccompProfile: + type: RuntimeDefault + # -- hubble-relay container security context + securityContext: + # readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + # -- hubble-relay service configuration. + service: + # --- The type of service used for Hubble Relay access, either ClusterIP, NodePort or LoadBalancer. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31234 + # -- Host to listen to. Specify an empty string to bind to all the interfaces. + listenHost: "" + # -- Port to listen to. + listenPort: "4245" + # -- TLS configuration for Hubble Relay + tls: + # -- The hubble-relay client certificate and private key. + # This keypair is presented to Hubble server instances for mTLS + # authentication and is required when hubble.tls.enabled is true. + # These values need to be set manually if hubble.tls.auto.enabled is false. + client: + # -- Name of the Secret containing the certificate and key for the Hubble metrics server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble relay client certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble relay client key (deprecated). + # Use existingSecret instead. + key: "" + # -- The hubble-relay server certificate and private key + server: + # When set to true, enable TLS on for Hubble Relay server + # (ie: for clients connecting to the Hubble Relay API). + enabled: false + # When set to true enforces mutual TLS between Hubble Relay server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + mtls: false + # -- Name of the Secret containing the certificate and key for the Hubble relay server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble relay server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble relay server key (deprecated). + # Use existingSecret instead. + key: "" + # -- extra DNS names added to certificate when its auto gen + extraDnsNames: [] + # -- extra IP addresses added to certificate when its auto gen + extraIpAddresses: [] + # DNS name used by the backend to connect to the relay + # This is a simple workaround as the relay certificates are currently hardcoded to + # *.hubble-relay.cilium.io + # See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546 + # For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local + relayName: "ui.hubble-relay.cilium.io" + # @schema + # type: [null, string] + # @schema + # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). + retryTimeout: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Max number of flows that can be buffered for sorting before being sent to the + # client (per request) (e.g. 100). + sortBufferLenMax: ~ + # @schema + # type: [null, string] + # @schema + # -- When the per-request flows sort buffer is not full, a flow is drained every + # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). + sortBufferDrainTimeout: ~ + # -- Port to use for the k8s service backed by hubble-relay pods. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + + # -- Enable prometheus metrics for hubble-relay on the configured port at + # /metrics + prometheus: + enabled: false + port: 9966 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble-relay + labels: {} + # -- Annotations to add to ServiceMonitor hubble-relay + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # @schema + # type: [null, string] + # @schema + # -- Timeout after which scrape is considered to be failed. + scrapeTimeout: ~ + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor hubble-relay + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor hubble-relay + metricRelabelings: ~ + gops: + # -- Enable gops for hubble-relay + enabled: true + # -- Configure gops listen port for hubble-relay + port: 9893 + pprof: + # -- Enable pprof for hubble-relay + enabled: false + # -- Configure pprof listen address for hubble-relay + address: localhost + # -- Configure pprof listen port for hubble-relay + port: 6062 + ui: + # -- Whether to enable the Hubble UI. + enabled: true + standalone: + # -- When true, it will allow installing the Hubble UI only, without checking dependencies. + # It is useful if a cluster already has cilium and Hubble relay installed and you just + # want Hubble UI to be deployed. + # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` + enabled: false + tls: + # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required + # to provide a volume for mounting the client certificates. + certsVolume: {} + # projected: + # defaultMode: 0400 + # sources: + # - secret: + # name: hubble-ui-client-certs + # items: + # - key: tls.crt + # path: client.crt + # - key: tls.key + # path: client.key + # - key: ca.crt + # path: hubble-relay-ca.crt + # -- Roll out Hubble-ui pods automatically when configmap is updated. + rollOutPods: true + tls: + client: + # -- Name of the Secret containing the client certificate and key for Hubble UI + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble UI client certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble UI client key (deprecated). + # Use existingSecret instead. + key: "" + backend: + # -- Hubble-ui backend image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-ui-backend" + tag: "v0.13.3" + digest: "sha256:db1454e45dc39ca41fbf7cad31eec95d99e5b9949c39daaad0fa81ef29d56953" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Hubble-ui backend security context. + securityContext: + allowPrivilegeEscalation: false + # -- Additional hubble-ui backend environment variables. + extraEnv: [] + # -- Additional hubble-ui backend volumes. + extraVolumes: [] + # -- Additional hubble-ui backend volumeMounts. + extraVolumeMounts: [] + livenessProbe: + # -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + enabled: false + readinessProbe: + # -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + enabled: false + # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + frontend: + # -- Hubble-ui frontend image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-ui" + tag: "v0.13.3" + digest: "sha256:661d5de7050182d495c6497ff0b007a7a1e379648e60830dd68c4d78ae21761d" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Hubble-ui frontend security context. + securityContext: + allowPrivilegeEscalation: false + # -- Additional hubble-ui frontend environment variables. + extraEnv: [] + # -- Additional hubble-ui frontend volumes. + extraVolumes: [] + # -- Additional hubble-ui frontend volumeMounts. + extraVolumeMounts: [] + # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + server: + # -- Controls server listener for ipv6 + ipv6: + enabled: true + # -- The number of replicas of Hubble UI to deploy. + replicas: 1 + # -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui) + annotations: {} + # -- Additional labels to be added to 'hubble-ui' deployment object + labels: {} + # -- Annotations to be added to hubble-ui pods + podAnnotations: {} + # -- Labels to be added to hubble-ui pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # @schema + # type: [null, string] + # @schema + # -- How are unhealthy, but running, pods counted for eviction + unhealthyPodEvictionPolicy: null + # -- Affinity for hubble-ui + affinity: {} + # -- Pod topology spread constraints for hubble-ui + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- The priority class to use for hubble-ui + priorityClassName: "" + # -- hubble-ui update strategy. + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 1 + # -- Security context to be added to Hubble UI pods + securityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + # -- hubble-ui service configuration. + service: + # -- Annotations to be added for the Hubble UI service + annotations: {} + # -- Labels to be added for the Hubble UI service + labels: {} + # --- The type of service used for Hubble UI access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31235 + # -- Defines base url prefix for all hubble-ui http requests. + # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. + # Trailing `/` is required for custom path, ex. `/service-map/` + baseUrl: "/" + # -- hubble-ui ingress configuration. + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + className: "" + hosts: + - chart-example.local + labels: {} + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # -- Hubble flows export. + export: + # --- Static exporter configuration. + # Static exporter is bound to agent lifecycle. + static: + enabled: false + filePath: /var/run/cilium/hubble/events.log + fieldMask: [] + # - time + # - source + # - destination + # - verdict + allowList: [] + # - '{"verdict":["DROPPED","ERROR"]}' + denyList: [] + # - '{"source_pod":["kube-system/"]}' + # - '{"destination_pod":["kube-system/"]}' + # --- Defines max file size of output file before it gets rotated. + fileMaxSizeMb: 10 + # --- Defines max number of backup/rotated files. + fileMaxBackups: 5 + # --- Enable compression of rotated files. + fileCompress: false + # --- Dynamic exporters configuration. + # Dynamic exporters may be reconfigured without a need of agent restarts. + dynamic: + enabled: false + config: + # ---- Name of configmap with configuration that may be altered to reconfigure exporters within a running agents. + configMapName: cilium-flowlog-config + # ---- True if helm installer should create config map. + # Switch to false if you want to self maintain the file content. + createConfigMap: true + # ---- Exporters configuration in YAML format. + content: + - name: all + fieldMask: [] + includeFilters: [] + excludeFilters: [] + filePath: "/var/run/cilium/hubble/events.log" + fileMaxSizeMb: 10 + fileMaxBackups: 5 + fileCompress: false + # - name: "test002" + # filePath: "/var/log/network/flow-log/pa/test002.log" + # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] + # includeFilters: + # - source_pod: ["default/"] + # event_type: + # - type: 1 + # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] + # excludeFilters: [] + # fileMaxSizeMb: 1 + # fileMaxBackups: 10 + # fileCompress: true + # end: "2023-10-09T23:59:59-07:00" + # -- Emit v1.Events related to pods on detection of packet drops. + # This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. + dropEventEmitter: + enabled: false + # --- Minimum time between emitting same events. + interval: 2m + # --- Drop reasons to emit events for. + # ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason + reasons: + - auth_required + - policy_denied +# -- Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends). +identityAllocationMode: "crd" +# -- (string) Time to wait before using new identity on endpoint identity change. +# @default -- `"5s"` +identityChangeGracePeriod: "" +# -- Install Iptables rules to skip netfilter connection tracking on all pod +# traffic. This option is only effective when Cilium is running in direct +# routing and full KPR mode. Moreover, this option cannot be enabled when Cilium +# is running in a managed Kubernetes environment or in a chained CNI setup. +installNoConntrackIptablesRules: false +ipam: + # -- Configure IP Address Management mode. + # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ + mode: kubernetes + # -- Maximum rate at which the CiliumNode custom resource is updated. + ciliumNodeUpdateRate: "15s" + # -- Pre-allocation settings for IPAM in Multi-Pool mode + multiPoolPreAllocation: "" + # -- Install ingress/egress routes through uplink on host for Pods when working with delegated IPAM plugin. + installUplinkRoutesForDelegatedIPAM: false + operator: + # @schema + # type: [array, string] + # @schema + # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"] + # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv4MaskSize: 24 + # @schema + # type: [array, string] + # @schema + # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv6PodCIDRList: ["fd00::/104"] + # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv6MaskSize: 120 + # -- IP pools to auto-create in multi-pool IPAM mode. + autoCreateCiliumPodIPPools: {} + # default: + # ipv4: + # cidrs: + # - 10.10.0.0/8 + # maskSize: 24 + # other: + # ipv6: + # cidrs: + # - fd00:100::/80 + # maskSize: 96 + # @schema + # type: [null, integer] + # @schema + # -- (int) The maximum burst size when rate limiting access to external APIs. + # Also known as the token bucket capacity. + # @default -- `20` + externalAPILimitBurstSize: ~ + # @schema + # type: [null, number] + # @schema + # -- (float) The maximum queries per second when rate limiting access to + # external APIs. Also known as the bucket refill rate, which is used to + # refill the bucket up to the burst size capacity. + # @default -- `4.0` + externalAPILimitQPS: ~ +# -- defaultLBServiceIPAM indicates the default LoadBalancer Service IPAM when +# no LoadBalancer class is set. Applicable values: lbipam, nodeipam, none +# @schema +# type: [string] +# @schema +defaultLBServiceIPAM: lbipam +nodeIPAM: + # -- Configure Node IPAM + # ref: https://docs.cilium.io/en/stable/network/node-ipam/ + enabled: false +# @schema +# type: [null, string] +# @schema +# -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API +apiRateLimit: ~ +# -- Configure the eBPF-based ip-masq-agent +ipMasqAgent: + enabled: false +# the config of nonMasqueradeCIDRs +# config: +# nonMasqueradeCIDRs: [] +# masqLinkLocal: false +# masqLinkLocalIPv6: false + +# iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. +# iptablesLockTimeout: "5s" +ipv4: + # -- Enable IPv4 support. + enabled: true +ipv6: + # -- Enable IPv6 support. + enabled: false +# -- Configure Kubernetes specific configuration +k8s: + # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + requireIPv4PodCIDR: false + # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + requireIPv6PodCIDR: false + # -- A space separated list of Kubernetes API server URLs to use with the client. + # For example "https://192.168.0.1:6443 https://192.168.0.2:6443" + # apiServerURLs: "" +# -- Keep the deprecated selector labels when deploying Cilium DaemonSet. +keepDeprecatedLabels: false +# -- Keep the deprecated probes when deploying Cilium DaemonSet +keepDeprecatedProbes: false +startupProbe: + # -- failure threshold of startup probe. + # Allow Cilium to take up to 600s to start up (300 attempts with 2s between attempts). + failureThreshold: 300 + # -- interval between checks of the startup probe + periodSeconds: 2 +livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 + # -- whether to require k8s connectivity as part of the check. + requireK8sConnectivity: false +readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 +# -- Configure the kube-proxy replacement in Cilium BPF datapath +# Valid options are "true" or "false". +# ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ +kubeProxyReplacement: true + +# -- healthz server bind address for the kube-proxy replacement. +# To enable set the value to '0.0.0.0:10256' for all ipv4 +# addresses and this '[::]:10256' for all ipv6 addresses. +# By default it is disabled. +kubeProxyReplacementHealthzBindAddr: "" +l2NeighDiscovery: + # -- Enable L2 neighbor discovery in the agent + enabled: false +# -- Enable Layer 7 network policy. +l7Proxy: true +# -- Enable Local Redirect Policy (deprecated, please use 'localRedirectPolicies.enabled' instead) +localRedirectPolicy: false +localRedirectPolicies: + # -- Enable local redirect policies. + enabled: false + # -- Limit the allowed addresses in Address Matcher rule of + # Local Redirect Policies to the given CIDRs. + # @schema@ + # type: [null, array] + # @schema@ + addressMatcherCIDRs: ~ +# To include or exclude matched resources from cilium identity evaluation +# labels: "" + +# logOptions allows you to define logging options. eg: +# logOptions: +# format: json + +# -- Enables periodic logging of system load +logSystemLoad: false +# -- Configure maglev consistent hashing +maglev: {} +# -- tableSize is the size (parameter M) for the backend table of one +# service entry +# tableSize: + +# -- hashSeed is the cluster-wide base64 encoded seed for the hashing +# hashSeed: + +# @schema +# type: [null, boolean] +# @schema +# -- (bool) Enables masquerading of IPv4 traffic leaving the node from endpoints. +# @default -- `true` unless ipam eni mode is active +enableIPv4Masquerade: ~ +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +enableIPv6Masquerade: true +# -- Enables masquerading to the source of the route for traffic leaving the node from endpoints. +enableMasqueradeRouteSource: false +# -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods +enableIPv4BIGTCP: false +# -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods +enableIPv6BIGTCP: false +nat: + # -- Number of the top-k SNAT map connections to track in Cilium statedb. + mapStatsEntries: 32 + # -- Interval between how often SNAT map is counted for stats. + mapStatsInterval: 30s +egressGateway: + # -- Enables egress gateway to redirect and SNAT the traffic that leaves the + # cluster. + enabled: false + # -- Time between triggers of egress gateway state reconciliations + reconciliationTriggerInterval: 1s + # -- Maximum number of entries in egress gateway policy map + # maxPolicyEntries: 16384 +vtep: + # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow + # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + enabled: false + # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + endpoint: "" + # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + cidr: "" + # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + mask: "" + # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + mac: "" +# -- (string) Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv4NativeRoutingCIDR: "" +# -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv6NativeRoutingCIDR: "" +# -- cilium-monitor sidecar. +monitor: + # -- Enable the cilium-monitor sidecar. + enabled: false +# -- Configure service load balancing +loadBalancer: + # -- standalone enables the standalone L4LB which does not connect to + # kube-apiserver. + # standalone: false + + # -- algorithm is the name of the load balancing algorithm for backend + # selection e.g. random or maglev + algorithm: maglev + + # -- mode is the operation mode of load balancing for remote backends + # e.g. snat, dsr, hybrid + # mode: snat + + # -- acceleration is the option to accelerate service handling via XDP + # Applicable values can be: disabled (do not use XDP), native (XDP BPF + # program is run directly out of the networking driver's early receive + # path), or best-effort (use native mode XDP acceleration on devices + # that support it). + acceleration: disabled + # -- dsrDispatch configures whether IP option (opt), IPIP encapsulation (ipip), + # Geneve Class Option (geneve) used to pass a service IP and port to remote backend + # dsrDispatch: opt + + # -- serviceTopology enables K8s Topology Aware Hints -based service + # endpoints filtering + # serviceTopology: false + + # -- L7 LoadBalancer + l7: + # -- Enable L7 service load balancing via envoy proxy. + # The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7, + # will be forwarded to the local backend proxy to be load balanced to the service endpoints. + # Please refer to docs for supported annotations for more configuration. + # + # Applicable values: + # - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well. + # - disabled: Disable L7 load balancing by way of service annotation. + backend: disabled + # -- List of ports from service to be automatically redirected to above backend. + # Any service exposing one of these ports will be automatically redirected. + # Fine-grained control can be achieved by using the service annotation. + ports: [] + # -- Default LB algorithm + # The default LB algorithm to be used for services, which can be overridden by the + # service annotation (e.g. service.cilium.io/lb-l7-algorithm) + # Applicable values: round_robin, least_request, random + algorithm: round_robin +# -- Configure N-S k8s service loadbalancing +nodePort: + # -- Enable the Cilium NodePort service implementation. + enabled: false + # -- Port range to use for NodePort services. + # range: "30000,32767" + + # @schema + # type: [null, string, array] + # @schema + # -- List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing. + # By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used. + # + # Example: + # + # addresses: ["192.168.1.0/24", "2001::/64"] + # + addresses: ~ + # -- Set to true to prevent applications binding to service ports. + bindProtection: true + # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral + # ports is detected. + autoProtectPortRange: true + # -- Enable healthcheck nodePort server for NodePort services + enableHealthCheck: true + # -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs + # EnableHealthCheck to be enabled + enableHealthCheckLoadBalancerIP: false +# policyAuditMode: false + +# -- The agent can be put into one of the three policy enforcement modes: +# default, always and never. +# ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes +policyEnforcementMode: "default" +# @schema +# type: [null, string, array] +# @schema +# -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector. +# The possible value is "nodes". +policyCIDRMatchMode: +pprof: + # -- Enable pprof for cilium-agent + enabled: false + # -- Configure pprof listen address for cilium-agent + address: localhost + # -- Configure pprof listen port for cilium-agent + port: 6060 +# -- Configure prometheus metrics on the configured port at /metrics +prometheus: + metricsService: false + enabled: false + port: 9962 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-agent + labels: {} + # -- Annotations to add to ServiceMonitor cilium-agent + annotations: {} + # -- jobLabel to add for ServiceMonitor cilium-agent + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # @schema + # type: [null, string] + # @schema + # -- Timeout after which scrape is considered to be failed. + scrapeTimeout: ~ + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-agent + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + action: replace + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-agent + metricRelabelings: ~ + # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying + trustCRDsExist: false + # @schema + # type: [null, array] + # @schema + # -- Metrics that should be enabled or disabled from the default metric list. + # The list is expected to be separated by a space. (+metric_foo to enable + # metric_foo , -metric_bar to disable metric_bar). + # ref: https://docs.cilium.io/en/stable/observability/metrics/ + metrics: ~ + # --- Enable controller group metrics for monitoring specific Cilium + # subsystems. The list is a list of controller group names. The special + # values of "all" and "none" are supported. The set of controller + # group names is not guaranteed to be stable between Cilium versions. + controllerGroupMetrics: + - write-cni-file + - sync-host-ips + - sync-lb-maps-with-k8s-services +# -- Grafana dashboards for cilium-agent +# grafana can import dashboards based on the label and value +# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards +dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} +# Configure Cilium Envoy options. +envoy: + # @schema + # type: [null, boolean] + # @schema + # -- Enable Envoy Proxy in standalone DaemonSet. + # This field is enabled by default for new installation. + # @default -- `true` for new installation + enabled: ~ + # -- (int) + # Set Envoy'--base-id' to use when allocating shared memory regions. + # Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0' + baseID: 0 + log: + # @schema + # type: [null, string] + # @schema + # -- The format string to use for laying out the log message metadata of Envoy. If specified, Envoy will use text format output. + # This setting is mutually exclusive with envoy.log.format_json. + format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v" + # @schema + # type: [null, object] + # @schema + # -- The JSON logging format to use for Envoy. This setting is mutually exclusive with envoy.log.format. + # ref: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/bootstrap/v3/bootstrap.proto#envoy-v3-api-field-config-bootstrap-v3-bootstrap-applicationlogconfig-logformat-json-format + format_json: null + # date: "%Y-%m-%dT%T.%e" + # thread_id: "%t" + # source_line: "%s:%#" + # level: "%l" + # logger: "%n" + # message: "%j" + # -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout. + path: "" + # @schema + # oneOf: + # - type: [null] + # - enum: [trace,debug,info,warning,error,critical,off] + # @schema + # -- Default log level of Envoy application log that is configured if Cilium debug / verbose logging isn't enabled. + # This option allows to have a different log level than the Cilium Agent - e.g. lower it to `critical`. + # Possible values: trace, debug, info, warning, error, critical, off + # @default -- Defaults to the default log level of the Cilium Agent - `info` + defaultLevel: ~ + # @schema + # type: [null, integer] + # @schema + # -- Size of the Envoy access log buffer created within the agent in bytes. + # Tune this value up if you encounter "Envoy: Discarded truncated access log message" errors. + # Large request/response header sizes (e.g. 16KiB) will require a larger buffer size. + accessLogBufferSize: 4096 + # -- Time in seconds after which a TCP connection attempt times out + connectTimeoutSeconds: 2 + # -- Time in seconds after which the initial fetch on an xDS stream is considered timed out + initialFetchTimeoutSeconds: 30 + # -- Maximum number of concurrent retries on Envoy clusters + maxConcurrentRetries: 128 + # -- Maximum number of retries for each HTTP request + httpRetryCount: 3 + # -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy + maxRequestsPerConnection: 0 + # -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable) + maxConnectionDurationSeconds: 0 + # -- Set Envoy upstream HTTP idle connection timeout seconds. + # Does not apply to connections with pending requests. Default 60s + idleTimeoutDurationSeconds: 60 + # -- Set Envoy the amount of time that the connection manager will allow a stream to exist with no upstream or downstream activity. + # default 5 minutes + streamIdleTimeoutDurationSeconds: 300 + # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. + xffNumTrustedHopsL7PolicyIngress: 0 + # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. + xffNumTrustedHopsL7PolicyEgress: 0 + # @schema + # type: [null, string] + # @schema + # -- Max duration to wait for endpoint policies to be restored on restart. Default "3m". + policyRestoreTimeoutDuration: null + # -- Time in seconds to block Envoy worker thread while an upstream HTTP connection is closing. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. + httpUpstreamLingerTimeout: null + # -- Envoy container image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium-envoy" + tag: "v1.34.7-1757592137-1a52bb680a956879722f48c591a2ca90f7791324" + pullPolicy: "IfNotPresent" + digest: "sha256:7932d656b63f6f866b6732099d33355184322123cfe1182e6f05175a3bc2e0e0" + useDigest: true + # -- Additional containers added to the cilium Envoy DaemonSet. + extraContainers: [] + # -- Additional envoy container arguments. + extraArgs: [] + # -- Additional envoy container environment variables. + extraEnv: [] + # -- Additional envoy hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional envoy volumes. + extraVolumes: [] + # -- Additional envoy volumeMounts. + extraVolumeMounts: [] + # -- Configure termination grace period for cilium-envoy DaemonSet. + terminationGracePeriodSeconds: 1 + # -- TCP port for the health API. + healthPort: 9878 + # -- cilium-envoy update strategy + # ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 2 + # -- Roll out cilium envoy pods automatically when configmap is updated. + rollOutPods: false + # -- ADVANCED OPTION: Bring your own custom Envoy bootstrap ConfigMap. Provide the name of a ConfigMap with a `bootstrap-config.json` key. + # When specified, Envoy will use this ConfigMap instead of the default provided by the chart. + # WARNING: Use of this setting has the potential to prevent cilium-envoy from starting up, and can cause unexpected behavior (e.g. due to + # syntax error or semantically incorrect configuration). Before submitting an issue, please ensure you have disabled this feature, as support + # cannot be provided for custom Envoy bootstrap configs. + # @schema + # type: [null, string] + # @schema + bootstrapConfigMap: ~ + # -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) + annotations: {} + # -- Security Context for cilium-envoy pods. + podSecurityContext: + # -- AppArmorProfile options for the `cilium-agent` and init containers + appArmorProfile: + type: "Unconfined" + # -- Annotations to be added to envoy pods + podAnnotations: {} + # -- Labels to be added to envoy pods + podLabels: {} + # -- Envoy resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + startupProbe: + # -- Enable startup probe for cilium-envoy + enabled: true + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 + livenessProbe: + # -- Enable liveness probe for cilium-envoy + enabled: true + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 + readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 + securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-envoy` container + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-envoy` container. + # Even though granted to the container, the cilium-envoy-starter wrapper drops + # all capabilities after forking the actual Envoy process. + # `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by + # setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the + # capability to the container). + # Note: In case of embedded envoy, the capability must be granted to the cilium-agent container. + envoy: + # Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT + - NET_ADMIN + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + #- SYS_ADMIN + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + - PERFMON + - BPF + # -- Keep capability `NET_BIND_SERVICE` for Envoy process. + keepCapNetBindService: true + # -- Affinity for cilium-envoy. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium-envoy + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cilium.io/no-schedule + operator: NotIn + values: + - "true" + # -- Node selector for cilium-envoy. + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for envoy scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # @schema + # type: [null, string] + # @schema + # -- The priority class to use for cilium-envoy. + priorityClassName: ~ + # @schema + # type: [null, string] + # @schema + # -- DNS policy for Cilium envoy pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: ~ + debug: + admin: + # -- Enable admin interface for cilium-envoy. + # This is useful for debugging and should not be enabled in production. + enabled: false + # -- Port number (bound to loopback interface). + # kubectl port-forward can be used to access the admin interface. + port: 9901 + # -- Configure Cilium Envoy Prometheus options. + # Note that some of these apply to either cilium-agent or cilium-envoy. + prometheus: + # -- Enable prometheus metrics for cilium-envoy + enabled: true + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + # Note that this setting applies to both cilium-envoy _and_ cilium-agent + # with Envoy enabled. + enabled: false + # -- Labels to add to ServiceMonitor cilium-envoy + labels: {} + # -- Annotations to add to ServiceMonitor cilium-envoy + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # @schema + # type: [null, string] + # @schema + # -- Timeout after which scrape is considered to be failed. + scrapeTimeout: ~ + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-envoy + # or for cilium-agent with Envoy configured. + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + action: replace + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy + # or for cilium-agent with Envoy configured. + metricRelabelings: ~ + # -- Serve prometheus metrics for cilium-envoy on the configured port + port: "9964" +# -- Enable/Disable use of node label based identity +nodeSelectorLabels: false +# To include or exclude matched resources from cilium node identity evaluation +# List of labels just like --labels flag (.Values.labels) +# nodeLabels: "" + +# -- Enable resource quotas for priority classes used in the cluster. +resourceQuotas: + enabled: false + cilium: + hard: + # 5k nodes * 2 DaemonSets (Cilium and cilium node init) + pods: "10k" + operator: + hard: + # 15 "clusterwide" Cilium Operator pods for HA + pods: "15" +# Need to document default +################## +#sessionAffinity: false + +# -- Annotations to be added to all cilium-secret namespaces (resources under templates/cilium-secrets-namespace) +secretsNamespaceAnnotations: {} +# -- Do not run Cilium agent when running with clean mode. Useful to completely +# uninstall Cilium as it will stop Cilium from starting and create artifacts +# in the node. +sleepAfterInit: false +# -- Enable check of service source ranges (currently, only for LoadBalancer). +svcSourceRangeCheck: true +# -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. +synchronizeK8sNodes: true +# -- Configure TLS configuration in the agent. +tls: + # @schema + # type: [null, string] + # @schema + # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies + # (namely the secrets referenced by terminatingTLS and originatingTLS). + # This value is DEPRECATED and will be removed in a future version. + # Use `tls.readSecretsOnlyFromSecretsNamespace` instead. + # Possible values: + # - local + # - k8s + secretsBackend: ~ + # @schema + # type: [null, boolean] + # @schema + # -- Configure if the Cilium Agent will only look in `tls.secretsNamespace` for + # CiliumNetworkPolicy relevant Secrets. + # If false, the Cilium Agent will be granted READ (GET/LIST/WATCH) access + # to _all_ secrets in the entire cluster. This is not recommended and is + # included for backwards compatibility. + # This value obsoletes `tls.secretsBackend`, with `true` == `local` in the old + # setting, and `false` == `k8s`. + readSecretsOnlyFromSecretsNamespace: ~ + # -- Configures where secrets used in CiliumNetworkPolicies will be looked for + secretsNamespace: + # -- Create secrets namespace for TLS Interception secrets. + create: true + # -- Name of TLS Interception secret namespace. + name: cilium-secrets + # -- Configures settings for synchronization of TLS Interception Secrets + secretSync: + # @schema + # type: [null, boolean] + # @schema + # -- Enable synchronization of Secrets for TLS Interception. If disabled and + # tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent. + enabled: ~ + # -- Base64 encoded PEM values for the CA certificate and private key. + # This can be used as common CA to generate certificates used by hubble and clustermesh components. + # It is neither required nor used when cert-manager is used to generate the certificates. + ca: + # -- Optional CA cert. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + cert: "" + # -- Optional CA private key. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + key: "" + # -- Generated certificates validity duration in days. This will be used for auto generated CA. + certValidityDuration: 1095 + # -- Configure the CA trust bundle used for the validation of the certificates + # leveraged by hubble and clustermesh. When enabled, it overrides the content of the + # 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time. + caBundle: + # -- Enable the use of the CA trust bundle. + enabled: false + # -- Name of the ConfigMap containing the CA trust bundle. + name: cilium-root-ca.crt + # -- Entry of the ConfigMap containing the CA trust bundle. + key: ca.crt + # -- Use a Secret instead of a ConfigMap. + useSecret: false + # If uncommented, creates the ConfigMap and fills it with the specified content. + # Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace. + # + # content: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- +# -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. +# Possible values: +# - "" +# - vxlan +# - geneve +# @default -- `"vxlan"` +tunnelProtocol: "" +# -- IP family for the underlay. +# @default -- `"ipv4"` +underlayProtocol: "" +# -- Enable native-routing mode or tunneling mode. +# Possible values: +# - "" +# - native +# - tunnel +# @default -- `"tunnel"` +routingMode: "" +# -- Configure VXLAN and Geneve tunnel port. +# @default -- Port 8472 for VXLAN, Port 6081 for Geneve +tunnelPort: 0 +# -- Configure VXLAN and Geneve tunnel source port range hint. +# @default -- 0-0 to let the kernel driver decide the range +tunnelSourcePortRange: 0-0 +# -- Configure what the response should be to traffic for a service without backends. +# Possible values: +# - reject (default) +# - drop +serviceNoBackendResponse: reject +# -- Configure the underlying network MTU to overwrite auto-detected MTU. +# This value doesn't change the host network interface MTU i.e. eth0 or ens0. +# It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net, +# cilium_vxlan and lxc_health interfaces. +MTU: 0 +# -- Disable the usage of CiliumEndpoint CRD. +disableEndpointCRD: false +wellKnownIdentities: + # -- Enable the use of well-known identities. + enabled: false +etcd: + # -- Enable etcd mode for the agent. + enabled: false + # -- List of etcd endpoints + endpoints: + - https://CHANGE-ME:2379 + # -- Enable use of TLS/SSL for connectivity to etcd. + ssl: false +operator: + # -- Enable the cilium-operator component (required). + enabled: true + # -- Roll out cilium-operator pods automatically when configmap is updated. + rollOutPods: true + # -- cilium-operator image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/operator" + tag: "v1.18.2" + # operator-generic-digest + genericDigest: "sha256:cb4e4ffc5789fd5ff6a534e3b1460623df61cba00f5ea1c7b40153b5efb81805" + # operator-azure-digest + azureDigest: "sha256:9696e9b8219b9a5c16987e072eda2da378d42a32f9305375e56d7380a0c2ba8e" + # operator-aws-digest + awsDigest: "sha256:1cb856fbe265dfbcfe816bd6aa4acaf006ecbb22dcc989116a1a81bb269ea328" + # operator-alibabacloud-digest + alibabacloudDigest: "sha256:612b1d94c179cd8ae239e571e96ebd95662bb5cccb62aacfdf79355aa9cdddc8" + useDigest: true + pullPolicy: "IfNotPresent" + suffix: "" + # -- Number of replicas to run for the cilium-operator deployment + replicas: 2 + # -- The priority class to use for cilium-operator + priorityClassName: "" + # -- DNS policy for Cilium operator pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: "" + # -- cilium-operator update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxSurge: 25% + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 50% + # -- Affinity for cilium-operator + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + # -- Pod topology spread constraints for cilium-operator + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for cilium-operator pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for cilium-operator scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + # Toleration for agentNotReadyTaintKey taint is always added to cilium-operator pods. + # @schema + # type: [null, array] + # @schema + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + - key: "node-role.kubernetes.io/master" #deprecated + operator: Exists + - key: "node.kubernetes.io/not-ready" + operator: Exists + - key: "node.cloudprovider.kubernetes.io/uninitialized" + operator: Exists + # -- Additional cilium-operator container arguments. + extraArgs: [] + # -- Additional cilium-operator environment variables. + extraEnv: [] + # -- Additional cilium-operator hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional cilium-operator volumes. + extraVolumes: [] + # -- Additional cilium-operator volumeMounts. + extraVolumeMounts: [] + # -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator) + annotations: {} + # -- HostNetwork setting + hostNetwork: true + # -- Security context to be added to cilium-operator pods + podSecurityContext: + seccompProfile: + type: RuntimeDefault + # -- Annotations to be added to cilium-operator pods + podAnnotations: {} + # -- Labels to be added to cilium-operator pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # @schema + # type: [null, string] + # @schema + # -- How are unhealthy, but running, pods counted for eviction + unhealthyPodEvictionPolicy: null + # -- cilium-operator resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 1000m + # memory: 1Gi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Security context to be added to cilium-operator pods + securityContext: + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + # runAsUser: 0 + + # -- Interval for endpoint garbage collection. + endpointGCInterval: "5m0s" + # -- Interval for cilium node garbage collection. + nodeGCInterval: "5m0s" + # -- Interval for identity garbage collection. + identityGCInterval: "15m0s" + # -- Timeout for identity heartbeats. + identityHeartbeatTimeout: "30m0s" + pprof: + # -- Enable pprof for cilium-operator + enabled: false + # -- Configure pprof listen address for cilium-operator + address: localhost + # -- Configure pprof listen port for cilium-operator + port: 6061 + # -- Enable prometheus metrics for cilium-operator on the configured port at + # /metrics + prometheus: + metricsService: false + enabled: true + port: 9963 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-operator + labels: {} + # -- Annotations to add to ServiceMonitor cilium-operator + annotations: {} + # -- jobLabel to add for ServiceMonitor cilium-operator + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # @schema + # type: [null, string] + # @schema + # -- Timeout after which scrape is considered to be failed. + scrapeTimeout: ~ + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor cilium-operator + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-operator + metricRelabelings: ~ + # -- Grafana dashboards for cilium-operator + # grafana can import dashboards based on the label and value + # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} + # -- Skip CRDs creation for cilium-operator + skipCRDCreation: false + # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium + # pod running. + removeNodeTaints: true + # @schema + # type: [null, boolean] + # @schema + # -- Taint nodes where Cilium is scheduled but not running. This prevents pods + # from being scheduled to nodes where Cilium is not the default CNI provider. + # @default -- same as removeNodeTaints + setNodeTaints: ~ + # -- Set Node condition NetworkUnavailable to 'false' with the reason + # 'CiliumIsUp' for nodes that have a healthy Cilium pod. + setNodeNetworkStatus: true + unmanagedPodWatcher: + # -- Restart any pod that are not managed by Cilium. + restart: true + # -- Interval, in seconds, to check if there are any pods that are not + # managed by Cilium. + intervalSeconds: 15 +nodeinit: + # -- Enable the node initialization DaemonSet + enabled: false + # -- node-init image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/startup-script" + tag: "1755531540-60ee83e" + digest: "sha256:0c91245afb3a4ff78b5cc8c09226806e94a9a10eb0adb74a85e0eeed2a5cae8c" + useDigest: true + pullPolicy: "IfNotPresent" + # -- The priority class to use for the nodeinit pod. + priorityClassName: "" + # -- node-init update strategy + updateStrategy: + type: RollingUpdate + # -- Additional nodeinit environment variables. + extraEnv: [] + # -- Additional nodeinit volumes. + extraVolumes: [] + # -- Additional nodeinit volumeMounts. + extraVolumeMounts: [] + # -- Affinity for cilium-nodeinit + affinity: {} + # -- Node labels for nodeinit pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for nodeinit scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit) + annotations: {} + # -- Annotations to be added to node-init pods. + podAnnotations: {} + # -- Labels to be added to node-init pods. + podLabels: {} + # -- Security Context for cilium-node-init pods. + podSecurityContext: + # -- AppArmorProfile options for the `cilium-node-init` and init containers + appArmorProfile: + type: "Unconfined" + # -- nodeinit resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: 100m + memory: 100Mi + # -- Security context to be added to nodeinit pods. + securityContext: + allowPrivilegeEscalation: false + privileged: false + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # Used for nsenter + - NET_ADMIN + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + # -- bootstrapFile is the location of the file where the bootstrap timestamp is + # written by the node-init DaemonSet + bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time" + # -- startup offers way to customize startup nodeinit script (pre and post position) + startup: + preScript: "" + postScript: "" + # -- prestop offers way to customize prestop nodeinit script (pre and post position) + prestop: + preScript: "" + postScript: "" +preflight: + # -- Enable Cilium pre-flight resources (required for upgrade) + enabled: false + # -- Cilium pre-flight image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.18.2" + # cilium-digest + digest: "sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667" + useDigest: true + pullPolicy: "IfNotPresent" + envoy: + # -- Envoy pre-flight image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium-envoy" + tag: "v1.34.7-1757592137-1a52bb680a956879722f48c591a2ca90f7791324" + pullPolicy: "IfNotPresent" + digest: "sha256:7932d656b63f6f866b6732099d33355184322123cfe1182e6f05175a3bc2e0e0" + useDigest: true + # -- The priority class to use for the preflight pod. + priorityClassName: "" + # -- preflight update strategy + updateStrategy: + type: RollingUpdate + # -- Additional preflight environment variables. + extraEnv: [] + # -- Additional preflight volumes. + extraVolumes: [] + # -- Additional preflight volumeMounts. + extraVolumeMounts: [] + # -- Affinity for cilium-preflight + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + # -- Node labels for preflight pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for preflight scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight) + annotations: {} + # -- Security context to be added to preflight pods. + podSecurityContext: {} + # -- Annotations to be added to preflight pods + podAnnotations: {} + # -- Labels to be added to the preflight pod. + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # @schema + # type: [null, string] + # @schema + # -- How are unhealthy, but running, pods counted for eviction + unhealthyPodEvictionPolicy: null + # -- preflight resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + readinessProbe: + # -- For how long kubelet should wait before performing the first probe + initialDelaySeconds: 5 + # -- interval between checks of the readiness probe + periodSeconds: 5 + # -- Security context to be added to preflight pods + securityContext: + allowPrivilegeEscalation: false + # runAsUser: 0 + + # -- Path to write the `--tofqdns-pre-cache` file to. + tofqdnsPreCache: "" + # -- Configure termination grace period for preflight Deployment and DaemonSet. + terminationGracePeriodSeconds: 1 + # -- By default we should always validate the installed CNPs before upgrading + # Cilium. This will make sure the user will have the policies deployed in the + # cluster with the right schema. + validateCNPs: true +# -- Explicitly enable or disable priority class. +# .Capabilities.KubeVersion is unsettable in `helm template` calls, +# it depends on k8s libraries version that Helm was compiled against. +# This option allows to explicitly disable setting the priority class, which +# is useful for rendering charts for gke clusters in advance. +enableCriticalPriorityClass: true +# disableEnvoyVersionCheck removes the check for Envoy, which can be useful +# on AArch64 as the images do not currently ship a version of Envoy. +#disableEnvoyVersionCheck: false +clustermesh: + # -- Deploy clustermesh-apiserver for clustermesh + useAPIServer: false + # -- The maximum number of clusters to support in a ClusterMesh. This value + # cannot be changed on running clusters, and all clusters in a ClusterMesh + # must be configured with the same value. Values > 255 will decrease the + # maximum allocatable cluster-local identities. + # Supported values are 255 and 511. + maxConnectedClusters: 255 + # -- Enable the synchronization of Kubernetes EndpointSlices corresponding to + # the remote endpoints of appropriately-annotated global services through ClusterMesh + enableEndpointSliceSynchronization: false + # -- Enable Multi-Cluster Services API support + enableMCSAPISupport: false + # -- Control whether policy rules assume by default the local cluster if not explicitly selected + policyDefaultLocalCluster: false + # -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config) + annotations: {} + # -- Clustermesh explicit configuration. + config: + # -- Enable the Clustermesh explicit configuration. + enabled: false + # -- Default dns domain for the Clustermesh API servers + # This is used in the case cluster addresses are not provided + # and IPs are used. + domain: mesh.cilium.io + # -- List of clusters to be peered in the mesh. + clusters: [] + # clusters: + # # -- Name of the cluster + # - name: cluster1 + # # -- Address of the cluster, use this if you created DNS records for + # # the cluster Clustermesh API server. + # address: cluster1.mesh.cilium.io + # # -- Port of the cluster Clustermesh API server. + # port: 2379 + # # -- IPs of the cluster Clustermesh API server, use multiple ones when + # # you have multiple IPs to access the Clustermesh API server. + # ips: + # - 172.18.255.201 + # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority. + # # These fields can (and should) be omitted in case the CA is shared across clusters. In that case, the + # # "remote" private key and certificate available in the local cluster are automatically used instead. + # tls: + # cert: "" + # key: "" + # caCert: "" + apiserver: + # -- Clustermesh API server image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/clustermesh-apiserver" + tag: "v1.18.2" + # clustermesh-apiserver-digest + digest: "sha256:cd689a07bfc7622e812fef023cb277fdc695b60a960d36f32f93614177a7a0f6" + useDigest: true + pullPolicy: "IfNotPresent" + # -- TCP port for the clustermesh-apiserver health API. + healthPort: 9880 + # -- Configuration for the clustermesh-apiserver readiness probe. + readinessProbe: {} + etcd: + # The etcd binary is included in the clustermesh API server image, so the same image from above is reused. + # Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is + # built with. + + # -- Specifies the resources for etcd container in the apiserver + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 1000m + # memory: 256Mi + + # -- Security context to be added to clustermesh-apiserver etcd containers + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- lifecycle setting for the etcd container + lifecycle: {} + init: + # -- Specifies the resources for etcd init container in the apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 100Mi + # limits: + # cpu: 100m + # memory: 100Mi + + # -- Additional arguments to `clustermesh-apiserver etcdinit`. + extraArgs: [] + # -- Additional environment variables to `clustermesh-apiserver etcdinit`. + extraEnv: [] + # @schema + # enum: [Disk, Memory] + # @schema + # -- Specifies whether etcd data is stored in a temporary volume backed by + # the node's default medium, such as disk, SSD or network storage (Disk), or + # RAM (Memory). The Memory option enables improved etcd read and write + # performance at the cost of additional memory usage, which counts against + # the memory limits of the container. + storageMedium: Disk + kvstoremesh: + # -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved + # from the remote clusters in the local etcd instance (deprecated - KVStoreMesh will always be enabled once the option is removed). + enabled: true + # -- TCP port for the KVStoreMesh health API. + healthPort: 9881 + # -- Configuration for the KVStoreMesh readiness probe. + readinessProbe: {} + # -- Additional KVStoreMesh arguments. + extraArgs: [] + # -- Additional KVStoreMesh environment variables. + extraEnv: [] + # -- Resource requests and limits for the KVStoreMesh container + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Additional KVStoreMesh volumeMounts. + extraVolumeMounts: [] + # -- KVStoreMesh Security context + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- lifecycle setting for the KVStoreMesh container + lifecycle: {} + # -- Specify the KVStore mode when running KVStoreMesh + # Supported values: + # - "internal": remote cluster identities are cached in etcd that runs as a sidecar within ``clustermesh-apiserver`` pod. + # - "external": ``clustermesh-apiserver`` will sync remote cluster information to the etcd used as kvstore. This can't be enabled with crd identity allocation mode. + kvstoreMode: "internal" + service: + # -- The type of service used for apiserver access. + type: NodePort + # -- Optional port to use as the node port for apiserver access. + # + # WARNING: make sure to configure a different NodePort in each cluster if + # kube-proxy replacement is enabled, as Cilium is currently affected by a known + # bug (#24692) when NodePorts are handled by the KPR implementation. If a service + # with the same NodePort exists both in the local and the remote cluster, all + # traffic originating from inside the cluster and targeting the corresponding + # NodePort will be redirected to a local backend, regardless of whether the + # destination node belongs to the local or the remote cluster. + nodePort: 32379 + # -- Annotations for the clustermesh-apiserver service. + # Example annotations to configure an internal load balancer on different cloud providers: + # * AKS: service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # * EKS: service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" + # * GKE: networking.gke.io/load-balancer-type: "Internal" + annotations: {} + # -- Labels for the clustermesh-apiserver service. + labels: {} + # @schema + # enum: [Local, Cluster] + # @schema + # -- The externalTrafficPolicy of service used for apiserver access. + externalTrafficPolicy: Cluster + # @schema + # enum: [Local, Cluster] + # @schema + # -- The internalTrafficPolicy of service used for apiserver access. + internalTrafficPolicy: Cluster + # @schema + # enum: [HAOnly, Always, Never] + # @schema + # -- Defines when to enable session affinity. + # Each replica in a clustermesh-apiserver deployment runs its own discrete + # etcd cluster. Remote clients connect to one of the replicas through a + # shared Kubernetes Service. A client reconnecting to a different backend + # will require a full resync to ensure data integrity. Session affinity + # can reduce the likelihood of this happening, but may not be supported + # by all cloud providers. + # Possible values: + # - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. + # - "Always" Always enable session affinity. + # - "Never" Never enable session affinity. Useful in environments where + # session affinity is not supported, but may lead to slightly + # degraded performance due to more frequent reconnections. + enableSessionAffinity: "HAOnly" + # @schema + # type: [null, string] + # @schema + # -- Configure a loadBalancerClass. + # Allows to configure the loadBalancerClass on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer + # (requires Kubernetes 1.24+). + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP. + # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer. + loadBalancerIP: ~ + # -- Configure loadBalancerSourceRanges. + # Allows to configure the source IP ranges allowed to access the + # clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. + loadBalancerSourceRanges: [] + # -- Number of replicas run for the clustermesh-apiserver deployment. + replicas: 1 + # -- lifecycle setting for the apiserver container + lifecycle: {} + # -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment + terminationGracePeriodSeconds: 30 + # -- Additional clustermesh-apiserver arguments. + extraArgs: [] + # -- Additional clustermesh-apiserver environment variables. + extraEnv: [] + # -- Additional clustermesh-apiserver volumes. + extraVolumes: [] + # -- Additional clustermesh-apiserver volumeMounts. + extraVolumeMounts: [] + # -- Security context to be added to clustermesh-apiserver containers + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- Security context to be added to clustermesh-apiserver pods + podSecurityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + fsGroup: 65532 + # -- Annotations to be added to clustermesh-apiserver pods + podAnnotations: {} + # -- Labels to be added to clustermesh-apiserver pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # @schema + # type: [null, string] + # @schema + # -- How are unhealthy, but running, pods counted for eviction + unhealthyPodEvictionPolicy: null + # -- Resource requests and limits for the clustermesh-apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Affinity for clustermesh.apiserver + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + k8s-app: clustermesh-apiserver + topologyKey: kubernetes.io/hostname + # -- Pod topology spread constraints for clustermesh-apiserver + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- clustermesh-apiserver update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxSurge: 1 + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 0 + # -- The priority class to use for clustermesh-apiserver + priorityClassName: "" + tls: + # -- Configure the clustermesh authentication mode. + # Supported values: + # - legacy: All clusters access remote clustermesh instances with the same + # username (i.e., remote). The "remote" certificate must be + # generated with CN=remote if provided manually. + # - migration: Intermediate mode required to upgrade from legacy to cluster + # (and vice versa) with no disruption. Specifically, it enables + # the creation of the per-cluster usernames, while still using + # the common one for authentication. The "remote" certificate must + # be generated with CN=remote if provided manually (same as legacy). + # - cluster: Each cluster accesses remote etcd instances with a username + # depending on the local cluster name (i.e., remote-). + # The "remote" certificate must be generated with CN=remote- + # if provided manually. Cluster mode is meaningful only when the same + # CA is shared across all clusters part of the mesh. + authMode: legacy + # -- Allow users to provide their own certificates + # Users may need to provide their certificates using + # a mechanism that requires they provide their own secrets. + # This setting does not apply to any of the auto-generated + # mechanisms below, it only restricts the creation of secrets + # via the `tls-provided` templates. + enableSecrets: true + # -- Configure automatic TLS certificates generation. + # A Kubernetes CronJob is used the generate any + # certificates not provided by the user at installation + # time. + auto: + # -- When set to true, automatically generate a CA and certificates to + # enable mTLS between clustermesh-apiserver and external workload instances. + # If set to false, the certs to be provided by setting appropriate values below. + enabled: true + # Sets the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Due to the out-of-band distribution of client certs to external workloads the + # CA is (re)regenerated only if it is not provided as a helm value and the k8s + # secret is manually deleted. + # + # Defaults to none. Commented syntax gives midnight of the first day of every + # fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + # schedule: "0 0 1 */4 *" + + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. + # Used if 'auto' is not enabled. + server: + cert: "" + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. + # Used if 'auto' is not enabled. + admin: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. + # Used if 'auto' is not enabled. + client: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. + # Used if 'auto' is not enabled. + remote: + cert: "" + key: "" + # clustermesh-apiserver Prometheus metrics configuration + metrics: + # -- Enables exporting apiserver metrics in OpenMetrics format. + enabled: true + # -- Configure the port the apiserver metric server listens on. + port: 9962 + kvstoremesh: + # -- Enables exporting KVStoreMesh metrics in OpenMetrics format. + enabled: true + # -- Configure the port the KVStoreMesh metric server listens on. + port: 9964 + etcd: + # -- Enables exporting etcd metrics in OpenMetrics format. + enabled: true + # -- Set level of detail for etcd metrics; specify 'extensive' to include server side gRPC histogram metrics. + mode: basic + # -- Configure the port the etcd metric server listens on. + port: 9963 + serviceMonitor: + # -- Enable service monitor. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor clustermesh-apiserver + labels: {} + # -- Annotations to add to ServiceMonitor clustermesh-apiserver + annotations: {} + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + + # -- Interval for scrape metrics (apiserver metrics) + interval: "10s" + # @schema + # type: [null, string] + # @schema + # -- Timeout after which scrape is considered to be failed. + scrapeTimeout: ~ + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + metricRelabelings: ~ + kvstoremesh: + # -- Interval for scrape metrics (KVStoreMesh metrics) + interval: "10s" + # @schema + # type: [null, string] + # @schema + # -- Timeout after which scrape is considered to be failed. + scrapeTimeout: ~ + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + metricRelabelings: ~ + etcd: + # -- Interval for scrape metrics (etcd metrics) + interval: "10s" + # @schema + # type: [null, string] + # @schema + # -- Timeout after which scrape is considered to be failed. + scrapeTimeout: ~ + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + metricRelabelings: ~ +# -- Configure cgroup related configuration +cgroup: + autoMount: + # -- Enable auto mount of cgroup2 filesystem. + # When `autoMount` is enabled, cgroup2 filesystem is mounted at + # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: false + # -- Init Container Cgroup Automount resource limits & requests + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) + hostRoot: /sys/fs/cgroup +# -- Configure sysctl override described in #20072. +sysctlfix: + # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. + enabled: true +# -- Configure whether to unload DNS policy rules on graceful shutdown +# dnsPolicyUnloadOnShutdown: false + +# -- Configure the key of the taint indicating that Cilium is not ready on the node. +# When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. +agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" +dnsProxy: + # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. + socketLingerTimeout: 10 + # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. + dnsRejectResponseCode: refused + # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. + enableDnsCompression: true + # -- Maximum number of IPs to maintain per FQDN name for each endpoint. + endpointMaxIpPerHostname: 1000 + # -- Time during which idle but previously active connections with expired DNS lookups are still considered alive. + idleConnectionGracePeriod: 0s + # -- Maximum number of IPs to retain for expired DNS lookups with still-active connections. + maxDeferredConnectionDeletes: 10000 + # -- The minimum time, in seconds, to use DNS data for toFQDNs policies. If + # the upstream DNS server returns a DNS record with a shorter TTL, Cilium + # overwrites the TTL with this value. Setting this value to zero means that + # Cilium will honor the TTLs returned by the upstream DNS server. + minTtl: 0 + # -- DNS cache data at this path is preloaded on agent startup. + preCache: "" + # -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. + proxyPort: 0 + # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. + proxyResponseMaxDelay: 100ms + # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) + # enableTransparentMode: true + # -- Pre-allocate ToFQDN identities. This reduces DNS proxy tail latency, at the potential cost of some + # unnecessary policymap entries. Disable this if you have a large (200+) number of unique ToFQDN selectors. + preAllocateIdentities: true +# -- SCTP Configuration Values +sctp: + # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. + enabled: false +# -- Enable Non-Default-Deny policies +enableNonDefaultDenyPolicies: true +# Configuration for types of authentication for Cilium (beta) +authentication: + # -- Enable authentication processing and garbage collection. + # Note that if disabled, policy enforcement will still block requests that require authentication. + # But the resulting authentication requests for these requests will not be processed, therefore the requests not be allowed. + enabled: true + # -- Buffer size of the channel Cilium uses to receive authentication events from the signal map. + queueSize: 1024 + # -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers. + rotatedIdentitiesQueueSize: 1024 + # -- Interval for garbage collection of auth map entries. + gcInterval: "5m0s" + # Configuration for Cilium's service-to-service mutual authentication using TLS handshakes. + # Note that this is not full mTLS support without also enabling encryption of some form. + # Current encryption options are WireGuard or IPsec, configured in encryption block above. + mutual: + # -- Port on the agent where mutual authentication handshakes between agents will be performed + port: 4250 + # -- Timeout for connecting to the remote node TCP socket + connectTimeout: 5s + # Settings for SPIRE + spire: + # -- Enable SPIRE integration (beta) + enabled: false + # -- Annotations to be added to all top-level spire objects (resources under templates/spire) + annotations: {} + # Settings to control the SPIRE installation and configuration + install: + # -- Enable SPIRE installation. + # This will only take effect only if authentication.mutual.spire.enabled is true + enabled: true + # -- SPIRE namespace to install into + namespace: cilium-spire + # -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. + existingNamespace: false + # -- init container image of SPIRE agent and server + initImage: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "docker.io/library/busybox" + tag: "1.37.0" + digest: "sha256:ab33eacc8251e3807b85bb6dba570e4698c3998eca6f0fc2ccb60575a563ea74" + useDigest: true + pullPolicy: "IfNotPresent" + # SPIRE agent configuration + agent: + # -- The priority class to use for the spire agent + priorityClassName: "" + # -- SPIRE agent image + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "ghcr.io/spiffe/spire-agent" + tag: "1.12.4" + digest: "sha256:163970884fba18860cac93655dc32b6af85a5dcf2ebb7e3e119a10888eff8fcd" + useDigest: true + pullPolicy: "IfNotPresent" + # -- SPIRE agent service account + serviceAccount: + create: true + name: spire-agent + # -- SPIRE agent annotations + annotations: {} + # -- SPIRE agent labels + labels: {} + # -- container resource limits & requests + resources: {} + # -- SPIRE Workload Attestor kubelet verification. + skipKubeletVerification: true + # -- SPIRE agent tolerations configuration + # By default it follows the same tolerations as the agent itself + # to allow the Cilium agent on this node to connect to SPIRE. + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node.cloudprovider.kubernetes.io/uninitialized + effect: NoSchedule + value: "true" + - key: CriticalAddonsOnly + operator: "Exists" + # -- SPIRE agent affinity configuration + affinity: {} + # -- SPIRE agent nodeSelector configuration + # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- Security context to be added to spire agent pods. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # -- Security context to be added to spire agent containers. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + server: + # -- The priority class to use for the spire server + priorityClassName: "" + # -- SPIRE server image + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "ghcr.io/spiffe/spire-server" + tag: "1.12.4" + digest: "sha256:34147f27066ab2be5cc10ca1d4bfd361144196467155d46c45f3519f41596e49" + useDigest: true + pullPolicy: "IfNotPresent" + # -- SPIRE server service account + serviceAccount: + create: true + name: spire-server + # -- SPIRE server init containers + initContainers: [] + # -- SPIRE server annotations + annotations: {} + # -- SPIRE server labels + labels: {} + # SPIRE server service configuration + # -- container resource limits & requests + resources: {} + service: + # -- Service type for the SPIRE server service + type: ClusterIP + # -- Annotations to be added to the SPIRE server service + annotations: {} + # -- Labels to be added to the SPIRE server service + labels: {} + # -- SPIRE server affinity configuration + affinity: {} + # -- SPIRE server nodeSelector configuration + # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- SPIRE server tolerations configuration + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # SPIRE server datastorage configuration + dataStorage: + # -- Enable SPIRE server data storage + enabled: true + # -- Size of the SPIRE server data storage + size: 1Gi + # -- Access mode of the SPIRE server data storage + accessMode: ReadWriteOnce + # @schema + # type: [null, string] + # @schema + # -- StorageClass of the SPIRE server data storage + storageClass: null + # -- Security context to be added to spire server pods. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # -- Security context to be added to spire server containers. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + # SPIRE CA configuration + ca: + # -- SPIRE CA key type + # AWS requires the use of RSA. EC cryptography is not supported + keyType: "rsa-4096" + # -- SPIRE CA Subject + subject: + country: "US" + organization: "SPIRE" + commonName: "Cilium SPIRE CA" + # @schema + # type: [null, string] + # @schema + # -- SPIRE server address used by Cilium Operator + # + # If k8s Service DNS along with port number is used (e.g. ..svc(.*): format), + # Cilium Operator will resolve its address by looking up the clusterIP from Service resource. + # + # Example values: 10.0.0.1:8081, spire-server.cilium-spire.svc:8081 + serverAddress: ~ + # -- SPIFFE trust domain to use for fetching certificates + trustDomain: spiffe.cilium + # -- SPIRE socket path where the SPIRE delegated api agent is listening + adminSocketPath: /run/spire/sockets/admin.sock + # -- SPIRE socket path where the SPIRE workload agent is listening. + # Applies to both the Cilium Agent and Operator + agentSocketPath: /run/spire/sockets/agent/agent.sock + # -- SPIRE connection timeout + connectionTimeout: 30s +# -- Enable Internal Traffic Policy +enableInternalTrafficPolicy: true +# -- Enable LoadBalancer IP Address Management +enableLBIPAM: true +