cilium 1.18.2

This commit is contained in:
Philip Haupt
2025-10-24 22:23:43 +02:00
parent 92c95645af
commit d796c11b41
5 changed files with 473 additions and 150 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -3,7 +3,6 @@ apiVersion: cilium.io/v2alpha1
kind: CiliumLoadBalancerIPPool
metadata:
name: ip-pool
namespase: kube-system
spec:
blocks:
- start: 192.168.0.129

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: cilium
repo: https://helm.cilium.io
version: 1.17.8
version: 1.18.2
releaseName: cilium
includeCRDs: true
namespace: kube-system

View File

@@ -20,7 +20,7 @@ commonLabels: {}
# Cilium will not change critical values to ensure continued operation
# This flag is not required for new installations.
# For example: '1.7', '1.8', '1.9'
upgradeCompatibility: 1.17.1
upgradeCompatibility: 1.17.8
debug:
# -- Enable debug logging
enabled: false
@@ -40,6 +40,14 @@ debug:
# - datapath
# - policy
verbose: ~
# -- Set the agent-internal metrics sampling frequency. This sets the
# frequency of the internal sampling of the agent metrics. These are
# available via the "cilium-dbg shell -- metrics -s" command and are
# part of the metrics HTML page included in the sysdump.
# @schema
# type: [null, string]
# @schema
metricsSamplingInterval: "5m"
rbac:
# -- Enable creation of Resource-Based Access Control configuration.
create: true
@@ -52,6 +60,18 @@ iptablesRandomFully: false
# -- (string) Kubernetes config path
# @default -- `"~/.kube/config"`
kubeConfigPath: ""
# -- Configure the Kubernetes service endpoint dynamically using a ConfigMap. Mutually exclusive with `k8sServiceHost`.
k8sServiceHostRef:
# @schema
# type: [string, null]
# @schema
# -- (string) name of the ConfigMap containing the Kubernetes service endpoint
name:
# @schema
# type: [string, null]
# @schema
# -- (string) Key in the ConfigMap containing the Kubernetes service endpoint
key:
# -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap
k8sServiceHost: localhost
# @schema
@@ -103,6 +123,14 @@ k8sClientRateLimit:
# The rate limiter will allow short bursts with a higher rate.
# @default -- 200
burst:
# -- Configure exponential backoff for client-go in Cilium agent.
k8sClientExponentialBackoff:
# -- Enable exponential backoff for client-go in Cilium agent.
enabled: true
# -- Configure base (in seconds) for exponential backoff.
backoffBaseSeconds: 1
# -- Configure maximum duration (in seconds) for exponential backoff.
backoffMaxDurationSeconds: 120
cluster:
# -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE.
# It must respect the following constraints:
@@ -180,7 +208,7 @@ serviceAccounts:
terminationGracePeriodSeconds: 1
# -- Install the cilium agent resources.
agent: true
# -- Agent container name.
# -- Agent daemonset name.
name: cilium
# -- Roll out cilium agent pods automatically when configmap is updated.
rollOutCiliumPods: true
@@ -191,10 +219,10 @@ image:
# @schema
override: ~
repository: "quay.io/cilium/cilium"
tag: "v1.17.8"
tag: "v1.18.2"
pullPolicy: "IfNotPresent"
# cilium-digest
digest: "sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636"
digest: "sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667"
useDigest: true
# -- Scheduling configurations for cilium pods
scheduling:
@@ -291,6 +319,8 @@ initResources: {}
securityContext:
# -- User to run the pod with
# runAsUser: 0
# -- disable privilege escalation
allowPrivilegeEscalation: false
# -- Run the pod with elevated privileges
privileged: false
# -- SELinux options for the `cilium-agent` and init containers
@@ -418,15 +448,11 @@ bandwidthManager:
enabled: false
# -- Activate BBR TCP congestion control for Pods
bbr: false
# -- Activate BBR TCP congestion control for Pods in the host namespace only.
bbrHostNamespaceOnly: false
# -- Configure standalone NAT46/NAT64 gateway
nat46x64Gateway:
# -- Enable RFC8215-prefixed translation
enabled: false
# -- EnableHighScaleIPcache enables the special ipcache mode for high scale
# clusters. The ipcache content will be reduced to the strict minimum and
# traffic will be encapsulated to carry security identities.
highScaleIPcache:
# -- Enable the high scale mode for the ipcache.
# -- Enable RFC6052-prefixed translation
enabled: false
# -- Configure L2 announcements
l2announcements:
@@ -444,6 +470,8 @@ l2podAnnouncements:
enabled: false
# -- Interface used for sending Gratuitous ARP pod announcements
interface: "eth0"
# -- A regular expression matching interfaces used for sending Gratuitous ARP pod announcements
# interfacePattern: ""
# -- This feature set enables virtual BGP routers to be created via
# CiliumBGPPeeringPolicy CRDs.
bgpControlPlane:
@@ -461,6 +489,18 @@ bgpControlPlane:
# It is recommended to enable status reporting in general, but if you have any issue
# such as high API server load, you can disable it by setting this to false.
enabled: true
# -- BGP router-id allocation mode
routerIDAllocation:
# -- BGP router-id allocation mode. In default mode, the router-id is derived from the IPv4 address if it is available, or else it is determined by the lower 32 bits of the MAC address.
mode: "default"
# -- IP pool to allocate the BGP router-id from when the mode is ip-pool.
ipPool: ""
# -- Legacy BGP ORIGIN attribute settings (BGPv2 only)
legacyOriginAttribute:
# -- Enable/Disable advertising LoadBalancerIP routes with the legacy
# BGP ORIGIN attribute value INCOMPLETE (2) instead of the default IGP (0).
# Enable for compatibility with the legacy behavior of MetalLB integration.
enabled: false
pmtuDiscovery:
# -- Enable path MTU discovery to send ICMP fragmentation-needed replies to
# the client.
@@ -572,6 +612,11 @@ bpf:
# type: [null, integer]
# @schema
policyMapMax: 16384
# -- Configure the maximum number of entries in global policy stats map.
# @schema
# type: [null, integer]
# @schema
policyStatsMapMax: 65536
# @schema
# type: [null, number, string]
# @schema
@@ -641,7 +686,7 @@ bpf:
# supported kernels.
# @default -- `true`
enableTCX: true
# -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only)
# -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2)
# @default -- `veth`
datapathMode: veth
# -- Enable BPF clock source probing for more efficient tick retrieval.
@@ -711,12 +756,15 @@ cni:
# readCniConf: /host/etc/cni/net.d/05-sample.conflist.input
# -- When defined, configMap will mount the provided value as ConfigMap and
# interpret the cniConf variable as CNI configuration file and write it
# when the agent starts up
# configMap: cni-configuration
# interpret the 'cni.configMapKey' value as CNI configuration file and write it
# when the agent starts up.
configMap: ""
# -- Configure the key in the CNI ConfigMap to read the contents of
# the CNI configuration from.
# the CNI configuration from. For this to be effective, the 'cni.configMap'
# parameter must be specified too.
# Note that the 'cni.configMap' parameter is the name of the ConfigMap, while
# 'cni.configMapKey' is the name of the key in the ConfigMap data containing
# the actual configuration.
configMapKey: cni-config
# -- Configure the path to where to mount the ConfigMap inside the agent pod.
confFileMountPath: /tmp/cni-configuration
@@ -730,6 +778,16 @@ cni:
memory: 10Mi
# -- Enable route MTU for pod netns when CNI chaining is used
enableRouteMTUForCNIChaining: false
# -- Enable the removal of iptables rules created by the AWS CNI VPC plugin.
iptablesRemoveAWSRules: true
# @schema
# type: [null, number]
# @schema
# -- (float64) Ratio of the connectivity probe frequency vs resource usage, a float in
# [0, 1]. 0 will give more frequent probing, 1 will give less frequent probing. Probing
# frequency is dynamically adjusted based on the cluster size.
# @default -- `0.5`
connectivityProbeFrequencyRatio: ~
# -- (string) Configure how frequently garbage collection should occur for the datapath
# connection tracking table.
# @default -- `"0s"`
@@ -795,13 +853,6 @@ daemon:
# a non-local route. This should be used only when autodetection is not suitable.
devices: eth+
# -- Enables experimental support for the detection of new and removed datapath
# devices. When devices change the eBPF datapath is reloaded and services updated.
# If "devices" is set then only those devices, or devices matching a wildcard will
# be considered.
#
# This option has been deprecated and is a no-op.
enableRuntimeDeviceDetection: true
# -- Forces the auto-detection of devices, even if specific devices are explicitly listed
forceDeviceDetection: false
# -- Chains to ignore when installing feeder rules.
@@ -816,8 +867,7 @@ forceDeviceDetection: false
# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it.
# enableK8sEndpointSlice: true
# -- Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead).
enableCiliumEndpointSlice: false
# -- CiliumEndpointSlice configuration options.
ciliumEndpointSlice:
# -- Enable Cilium EndpointSlice feature.
enabled: false
@@ -833,13 +883,13 @@ ciliumEndpointSlice:
- nodes: 100
limit: 50
burst: 100
# @schema
# enum: ["identity", "fcfs"]
# @schema
# -- The slicing mode to use for CiliumEndpointSlices.
# identity groups together CiliumEndpoints that share the same identity.
# fcfs groups together CiliumEndpoints in a first-come-first-serve basis, filling in the largest non-full slice first.
sliceMode: identity
# @schema
# enum: ["agent", "operator", "both"]
# @schema
# -- Control whether CiliumIdentities are created by the agent ("agent"), the operator ("operator") or both ("both").
# "Both" should be used only to migrate between "agent" and "operator".
# Operator-managed identities is a beta feature.
identityManagementMode: "agent"
envoyConfig:
# -- Enable CiliumEnvoyConfig CRD
# CiliumEnvoyConfig CRD can also be implicitly enabled by other options.
@@ -1049,8 +1099,6 @@ endpointLockdownOnMapOverflow: false
eni:
# -- Enable Elastic Network Interface (ENI) integration.
enabled: false
# -- Update ENI Adapter limits from the EC2 API
updateEC2AdapterLimitViaAPI: true
# -- Release IPs not used from the ENI
awsReleaseExcessIPs: false
# -- Enable ENI prefix delegation
@@ -1099,9 +1147,6 @@ healthCheckICMPFailureThreshold: 3
hostFirewall:
# -- Enables the enforcement of host policies in the eBPF datapath.
enabled: false
hostPort:
# -- Enable hostPort service support.
enabled: false
# -- Configure socket LB
socketLB:
# -- Enable socket LB
@@ -1125,8 +1170,8 @@ certgen:
# @schema
override: ~
repository: "quay.io/cilium/certgen"
tag: "v0.2.1"
digest: "sha256:ab6b1928e9c5f424f6b0f51c68065b9fd85e2f8d3e5f21fbd1a3cb27e6fb9321"
tag: "v0.2.4"
digest: "sha256:de7b97b1d19a34b674d0c4bc1da4db999f04ae355923a9a994ac3a81e1a1b5ff"
useDigest: true
pullPolicy: "IfNotPresent"
# -- Seconds after which the completed job pod will be deleted
@@ -1146,6 +1191,9 @@ certgen:
# -- Node tolerations for pod assignment on nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
# -- Resource limits for certgen
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers
resources: {}
# -- Additional certgen volumes.
extraVolumes: []
# -- Additional certgen volumeMounts.
@@ -1241,11 +1289,17 @@ hubble:
jobLabel: ""
# -- Interval for scrape metrics.
interval: "10s"
# @schema
# type: [null, string]
# @schema
# -- Timeout after which scrape is considered to be failed.
scrapeTimeout: ~
# -- Relabeling configs for the ServiceMonitor hubble
relabelings:
- sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
action: replace
replacement: ${1}
# @schema
# type: [null, array]
@@ -1285,6 +1339,10 @@ hubble:
# excludeFilters: []
# -- Unix domain socket path to listen to when Hubble is enabled.
socketPath: /var/run/cilium/hubble.sock
# -- Enables network policy correlation of Hubble flows, i.e. populating `egress_allowed_by`, `ingress_denied_by` fields with policy information.
networkPolicyCorrelation:
# @default -- `true`
enabled: true
# -- Enables redacting sensitive information present in Layer 7 flows.
redact:
enabled: false
@@ -1450,9 +1508,9 @@ hubble:
# @schema
override: ~
repository: "quay.io/cilium/hubble-relay"
tag: "v1.17.8"
tag: "v1.18.2"
# hubble-relay-digest
digest: "sha256:2e576bf7a02291c07bffbc1ca0a66a6c70f4c3eb155480e5b3ac027bedd2858b"
digest: "sha256:6079308ee15e44dff476fb522612732f7c5c4407a1017bc3470916242b0405ac"
useDigest: true
pullPolicy: "IfNotPresent"
# -- Specifies the resources for the hubble-relay pods
@@ -1504,6 +1562,11 @@ hubble:
# @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
# @schema
# type: [null, string]
# @schema
# -- How are unhealthy, but running, pods counted for eviction
unhealthyPodEvictionPolicy: null
# -- The priority class to use for hubble-relay
priorityClassName: ""
# -- Configure termination grace period for hubble relay Deployment.
@@ -1523,12 +1586,17 @@ hubble:
# -- hubble-relay pod security context
podSecurityContext:
fsGroup: 65532
seccompProfile:
type: RuntimeDefault
# -- hubble-relay container security context
securityContext:
# readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
@@ -1589,13 +1657,6 @@ hubble:
# @schema
# type: [null, string]
# @schema
# -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s").
#
# This option has been deprecated and is a no-op.
dialTimeout: ~
# @schema
# type: [null, string]
# @schema
# -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s").
retryTimeout: ~
# @schema
@@ -1630,6 +1691,11 @@ hubble:
annotations: {}
# -- Interval for scrape metrics.
interval: "10s"
# @schema
# type: [null, string]
# @schema
# -- Timeout after which scrape is considered to be failed.
scrapeTimeout: ~
# -- Specify the Kubernetes namespace where Prometheus expects to find
# service monitors configured.
# namespace: ""
@@ -1706,7 +1772,8 @@ hubble:
useDigest: true
pullPolicy: "IfNotPresent"
# -- Hubble-ui backend security context.
securityContext: {}
securityContext:
allowPrivilegeEscalation: false
# -- Additional hubble-ui backend environment variables.
extraEnv: []
# -- Additional hubble-ui backend volumes.
@@ -1740,7 +1807,8 @@ hubble:
useDigest: true
pullPolicy: "IfNotPresent"
# -- Hubble-ui frontend security context.
securityContext: {}
securityContext:
allowPrivilegeEscalation: false
# -- Additional hubble-ui frontend environment variables.
extraEnv: []
# -- Additional hubble-ui frontend volumes.
@@ -1785,6 +1853,11 @@ hubble:
# @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
# @schema
# type: [null, string]
# @schema
# -- How are unhealthy, but running, pods counted for eviction
unhealthyPodEvictionPolicy: null
# -- Affinity for hubble-ui
affinity: {}
# -- Pod topology spread constraints for hubble-ui
@@ -1819,6 +1892,8 @@ hubble:
service:
# -- Annotations to be added for the Hubble UI service
annotations: {}
# -- Labels to be added for the Hubble UI service
labels: {}
# --- The type of service used for Hubble UI access, either ClusterIP or NodePort.
type: ClusterIP
# --- The port to use when the service type is set to NodePort.
@@ -1843,10 +1918,6 @@ hubble:
# - chart-example.local
# -- Hubble flows export.
export:
# --- Defines max file size of output file before it gets rotated.
fileMaxSizeMb: 10
# --- Defines max number of backup/rotated files.
fileMaxBackups: 5
# --- Static exporter configuration.
# Static exporter is bound to agent lifecycle.
static:
@@ -1862,6 +1933,12 @@ hubble:
denyList: []
# - '{"source_pod":["kube-system/"]}'
# - '{"destination_pod":["kube-system/"]}'
# --- Defines max file size of output file before it gets rotated.
fileMaxSizeMb: 10
# --- Defines max number of backup/rotated files.
fileMaxBackups: 5
# --- Enable compression of rotated files.
fileCompress: false
# --- Dynamic exporters configuration.
# Dynamic exporters may be reconfigured without a need of agent restarts.
dynamic:
@@ -1879,6 +1956,9 @@ hubble:
includeFilters: []
excludeFilters: []
filePath: "/var/run/cilium/hubble/events.log"
fileMaxSizeMb: 10
fileMaxBackups: 5
fileCompress: false
# - name: "test002"
# filePath: "/var/log/network/flow-log/pa/test002.log"
# fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"]
@@ -1888,6 +1968,9 @@ hubble:
# - type: 1
# - destination_pod: ["frontend/nginx-975996d4c-7hhgt"]
# excludeFilters: []
# fileMaxSizeMb: 1
# fileMaxBackups: 10
# fileCompress: true
# end: "2023-10-09T23:59:59-07:00"
# -- Emit v1.Events related to pods on detection of packet drops.
# This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975.
@@ -2002,14 +2085,17 @@ k8s:
# -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR
# range via the Kubernetes node resource
requireIPv6PodCIDR: false
# -- A space separated list of Kubernetes API server URLs to use with the client.
# For example "https://192.168.0.1:6443 https://192.168.0.2:6443"
# apiServerURLs: ""
# -- Keep the deprecated selector labels when deploying Cilium DaemonSet.
keepDeprecatedLabels: false
# -- Keep the deprecated probes when deploying Cilium DaemonSet
keepDeprecatedProbes: false
startupProbe:
# -- failure threshold of startup probe.
# 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
failureThreshold: 105
# Allow Cilium to take up to 600s to start up (300 attempts with 2s between attempts).
failureThreshold: 300
# -- interval between checks of the startup probe
periodSeconds: 2
livenessProbe:
@@ -2037,12 +2123,19 @@ kubeProxyReplacementHealthzBindAddr: ""
l2NeighDiscovery:
# -- Enable L2 neighbor discovery in the agent
enabled: true
# -- Override the agent's default neighbor resolution refresh period.
refreshPeriod: "30s"
# -- Enable Layer 7 network policy.
l7Proxy: true
# -- Enable Local Redirect Policy.
# -- Enable Local Redirect Policy (deprecated, please use 'localRedirectPolicies.enabled' instead)
localRedirectPolicy: false
localRedirectPolicies:
# -- Enable local redirect policies.
enabled: false
# -- Limit the allowed addresses in Address Matcher rule of
# Local Redirect Policies to the given CIDRs.
# @schema@
# type: [null, array]
# @schema@
addressMatcherCIDRs: ~
# To include or exclude matched resources from cilium identity evaluation
# labels: ""
@@ -2061,7 +2154,11 @@ maglev: {}
# -- hashSeed is the cluster-wide base64 encoded seed for the hashing
# hashSeed:
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
# @schema
# type: [null, boolean]
# @schema
# -- (bool) Enables masquerading of IPv4 traffic leaving the node from endpoints.
# @default -- `true` unless ipam eni mode is active
enableIPv4Masquerade: true
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
enableIPv6Masquerade: true
@@ -2142,17 +2239,14 @@ loadBalancer:
# path), or best-effort (use native mode XDP acceleration on devices
# that support it).
acceleration: disabled
# -- dsrDispatch configures whether IP option or IPIP encapsulation is
# used to pass a service IP and port to remote backend
# -- dsrDispatch configures whether IP option (opt), IPIP encapsulation (ipip),
# Geneve Class Option (geneve) used to pass a service IP and port to remote backend
# dsrDispatch: opt
# -- serviceTopology enables K8s Topology Aware Hints -based service
# endpoints filtering
# serviceTopology: false
# -- experimental enables support for the experimental load-balancing
# control-plane.
experimental: false
# -- L7 LoadBalancer
l7:
# -- Enable L7 service load balancing via envoy proxy.
@@ -2237,6 +2331,11 @@ prometheus:
jobLabel: ""
# -- Interval for scrape metrics.
interval: "10s"
# @schema
# type: [null, string]
# @schema
# -- Timeout after which scrape is considered to be failed.
scrapeTimeout: ~
# -- Specify the Kubernetes namespace where Prometheus expects to find
# service monitors configured.
# namespace: ""
@@ -2245,6 +2344,7 @@ prometheus:
- sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
action: replace
replacement: ${1}
# @schema
# type: [null, array]
@@ -2347,6 +2447,9 @@ envoy:
# -- Set Envoy upstream HTTP idle connection timeout seconds.
# Does not apply to connections with pending requests. Default 60s
idleTimeoutDurationSeconds: 60
# -- Set Envoy the amount of time that the connection manager will allow a stream to exist with no upstream or downstream activity.
# default 5 minutes
streamIdleTimeoutDurationSeconds: 300
# -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners.
xffNumTrustedHopsL7PolicyIngress: 0
# -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners.
@@ -2356,6 +2459,8 @@ envoy:
# @schema
# -- Max duration to wait for endpoint policies to be restored on restart. Default "3m".
policyRestoreTimeoutDuration: null
# -- Time in seconds to block Envoy worker thread while an upstream HTTP connection is closing. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background.
httpUpstreamLingerTimeout: null
# -- Envoy container image.
image:
# @schema
@@ -2363,9 +2468,9 @@ envoy:
# @schema
override: ~
repository: "quay.io/cilium/cilium-envoy"
tag: "v1.33.9-1757932127-3c04e8f2f1027d106b96f8ef4a0215e81dbaaece"
tag: "v1.34.7-1757592137-1a52bb680a956879722f48c591a2ca90f7791324"
pullPolicy: "IfNotPresent"
digest: "sha256:06fbc4e55d926dd82ff2a0049919248dcc6be5354609b09012b01bc9c5b0ee28"
digest: "sha256:7932d656b63f6f866b6732099d33355184322123cfe1182e6f05175a3bc2e0e0"
useDigest: true
# -- Additional containers added to the cilium Envoy DaemonSet.
extraContainers: []
@@ -2432,12 +2537,16 @@ envoy:
# memory: 512Mi
startupProbe:
# -- Enable startup probe for cilium-envoy
enabled: true
# -- failure threshold of startup probe.
# 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
failureThreshold: 105
# -- interval between checks of the startup probe
periodSeconds: 2
livenessProbe:
# -- Enable liveness probe for cilium-envoy
enabled: true
# -- failure threshold of liveness probe
failureThreshold: 10
# -- interval between checks of the liveness probe
@@ -2550,6 +2659,11 @@ envoy:
annotations: {}
# -- Interval for scrape metrics.
interval: "10s"
# @schema
# type: [null, string]
# @schema
# -- Timeout after which scrape is considered to be failed.
scrapeTimeout: ~
# -- Specify the Kubernetes namespace where Prometheus expects to find
# service monitors configured.
# namespace: ""
@@ -2559,6 +2673,7 @@ envoy:
- sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
action: replace
replacement: ${1}
# @schema
# type: [null, array]
@@ -2570,6 +2685,10 @@ envoy:
port: "9964"
# -- Enable/Disable use of node label based identity
nodeSelectorLabels: false
# To include or exclude matched resources from cilium node identity evaluation
# List of labels just like --labels flag (.Values.labels)
# nodeLabels: ""
# -- Enable resource quotas for priority classes used in the cluster.
resourceQuotas:
enabled: false
@@ -2585,6 +2704,8 @@ resourceQuotas:
##################
#sessionAffinity: false
# -- Annotations to be added to all cilium-secret namespaces (resources under templates/cilium-secrets-namespace)
secretsNamespaceAnnotations: {}
# -- Do not run Cilium agent when running with clean mode. Useful to completely
# uninstall Cilium as it will stop Cilium from starting and create artifacts
# in the node.
@@ -2672,6 +2793,9 @@ tls:
# - geneve
# @default -- `"vxlan"`
tunnelProtocol: ""
# -- IP family for the underlay.
# @default -- `"ipv4"`
underlayProtocol: ""
# -- Enable native-routing mode or tunneling mode.
# Possible values:
# - ""
@@ -2720,15 +2844,15 @@ operator:
# @schema
override: ~
repository: "quay.io/cilium/operator"
tag: "v1.17.8"
tag: "v1.18.2"
# operator-generic-digest
genericDigest: "sha256:5468807b9c31997f3a1a14558ec7c20c5b962a2df6db633b7afbe2f45a15da1c"
genericDigest: "sha256:cb4e4ffc5789fd5ff6a534e3b1460623df61cba00f5ea1c7b40153b5efb81805"
# operator-azure-digest
azureDigest: "sha256:619f9febf3efef2724a26522b253e4595cd33c274f5f49925e29a795fdc2d2d7"
azureDigest: "sha256:9696e9b8219b9a5c16987e072eda2da378d42a32f9305375e56d7380a0c2ba8e"
# operator-aws-digest
awsDigest: "sha256:28012f7d0f4f23e9f6c7d6a5dd931afa326bbac3e8103f3f6f22b9670847dffa"
awsDigest: "sha256:1cb856fbe265dfbcfe816bd6aa4acaf006ecbb22dcc989116a1a81bb269ea328"
# operator-alibabacloud-digest
alibabacloudDigest: "sha256:72c25a405ad8e58d2cf03f7ea2b6696ed1edcfb51716b5f85e45c6c4fcaa6056"
alibabacloudDigest: "sha256:612b1d94c179cd8ae239e571e96ebd95662bb5cccb62aacfdf79355aa9cdddc8"
useDigest: true
pullPolicy: "IfNotPresent"
suffix: ""
@@ -2771,12 +2895,19 @@ operator:
kubernetes.io/os: linux
# -- Node tolerations for cilium-operator scheduling to nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
# Toleration for agentNotReadyTaintKey taint is always added to cilium-operator pods.
# @schema
# type: [null, array]
# @schema
tolerations:
- operator: Exists
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
- key: "node-role.kubernetes.io/control-plane"
operator: Exists
- key: "node-role.kubernetes.io/master" #deprecated
operator: Exists
- key: "node.kubernetes.io/not-ready"
operator: Exists
- key: "node.cloudprovider.kubernetes.io/uninitialized"
operator: Exists
# -- Additional cilium-operator container arguments.
extraArgs: []
# -- Additional cilium-operator environment variables.
@@ -2799,7 +2930,9 @@ operator:
# -- HostNetwork setting
hostNetwork: true
# -- Security context to be added to cilium-operator pods
podSecurityContext: {}
podSecurityContext:
seccompProfile:
type: RuntimeDefault
# -- Annotations to be added to cilium-operator pods
podAnnotations: {}
# -- Labels to be added to cilium-operator pods
@@ -2820,6 +2953,11 @@ operator:
# @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
# @schema
# type: [null, string]
# @schema
# -- How are unhealthy, but running, pods counted for eviction
unhealthyPodEvictionPolicy: null
# -- cilium-operator resource limits & requests
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
@@ -2831,7 +2969,11 @@ operator:
# memory: 128Mi
# -- Security context to be added to cilium-operator pods
securityContext: {}
securityContext:
capabilities:
drop:
- ALL
allowPrivilegeEscalation: false
# runAsUser: 0
# -- Interval for endpoint garbage collection.
@@ -2868,6 +3010,11 @@ operator:
# -- Interval for scrape metrics.
interval: "10s"
# @schema
# type: [null, string]
# @schema
# -- Timeout after which scrape is considered to be failed.
scrapeTimeout: ~
# @schema
# type: [null, array]
# @schema
# -- Relabeling configs for the ServiceMonitor cilium-operator
@@ -2921,7 +3068,7 @@ nodeinit:
override: ~
repository: "quay.io/cilium/startup-script"
tag: "1755531540-60ee83e"
digest: "sha256:5bdca3c2dec2c79f58d45a7a560bf1098c2126350c901379fe850b7f78d3d757"
digest: "sha256:0c91245afb3a4ff78b5cc8c09226806e94a9a10eb0adb74a85e0eeed2a5cae8c"
useDigest: true
pullPolicy: "IfNotPresent"
# -- The priority class to use for the nodeinit pod.
@@ -2968,6 +3115,7 @@ nodeinit:
memory: 100Mi
# -- Security context to be added to nodeinit pods.
securityContext:
allowPrivilegeEscalation: false
privileged: false
seLinuxOptions:
level: 's0'
@@ -3005,11 +3153,23 @@ preflight:
# @schema
override: ~
repository: "quay.io/cilium/cilium"
tag: "v1.17.8"
tag: "v1.18.2"
# cilium-digest
digest: "sha256:6d7ea72ed311eeca4c75a1f17617a3d596fb6038d30d00799090679f82a01636"
digest: "sha256:858f807ea4e20e85e3ea3240a762e1f4b29f1cb5bbd0463b8aa77e7b097c0667"
useDigest: true
pullPolicy: "IfNotPresent"
envoy:
# -- Envoy pre-flight image.
image:
# @schema
# type: [null, string]
# @schema
override: ~
repository: "quay.io/cilium/cilium-envoy"
tag: "v1.34.7-1757592137-1a52bb680a956879722f48c591a2ca90f7791324"
pullPolicy: "IfNotPresent"
digest: "sha256:7932d656b63f6f866b6732099d33355184322123cfe1182e6f05175a3bc2e0e0"
useDigest: true
# -- The priority class to use for the preflight pod.
priorityClassName: ""
# -- preflight update strategy
@@ -3065,6 +3225,11 @@ preflight:
# @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
# @schema
# type: [null, string]
# @schema
# -- How are unhealthy, but running, pods counted for eviction
unhealthyPodEvictionPolicy: null
# -- preflight resource limits & requests
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
@@ -3081,7 +3246,8 @@ preflight:
# -- interval between checks of the readiness probe
periodSeconds: 5
# -- Security context to be added to preflight pods
securityContext: {}
securityContext:
allowPrivilegeEscalation: false
# runAsUser: 0
# -- Path to write the `--tofqdns-pre-cache` file to.
@@ -3115,6 +3281,8 @@ clustermesh:
enableEndpointSliceSynchronization: false
# -- Enable Multi-Cluster Services API support
enableMCSAPISupport: false
# -- Control whether policy rules assume by default the local cluster if not explicitly selected
policyDefaultLocalCluster: false
# -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config)
annotations: {}
# -- Clustermesh explicit configuration.
@@ -3154,9 +3322,9 @@ clustermesh:
# @schema
override: ~
repository: "quay.io/cilium/clustermesh-apiserver"
tag: "v1.17.8"
tag: "v1.18.2"
# clustermesh-apiserver-digest
digest: "sha256:3ac210d94d37a77ec010f9ac4c705edc8f15f22afa2b9a6f0e2a7d64d2360586"
digest: "sha256:cd689a07bfc7622e812fef023cb277fdc695b60a960d36f32f93614177a7a0f6"
useDigest: true
pullPolicy: "IfNotPresent"
# -- TCP port for the clustermesh-apiserver health API.
@@ -3210,7 +3378,7 @@ clustermesh:
storageMedium: Disk
kvstoremesh:
# -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved
# from the remote clusters in the local etcd instance.
# from the remote clusters in the local etcd instance (deprecated - KVStoreMesh will always be enabled once the option is removed).
enabled: true
# -- TCP port for the KVStoreMesh health API.
healthPort: 9881
@@ -3239,6 +3407,11 @@ clustermesh:
- ALL
# -- lifecycle setting for the KVStoreMesh container
lifecycle: {}
# -- Specify the KVStore mode when running KVStoreMesh
# Supported values:
# - "internal": remote cluster identities are cached in etcd that runs as a sidecar within ``clustermesh-apiserver`` pod.
# - "external": ``clustermesh-apiserver`` will sync remote cluster information to the etcd used as kvstore. This can't be enabled with crd identity allocation mode.
kvstoreMode: "internal"
service:
# -- The type of service used for apiserver access.
type: NodePort
@@ -3352,6 +3525,11 @@ clustermesh:
# @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
# @schema
# type: [null, string]
# @schema
# -- How are unhealthy, but running, pods counted for eviction
unhealthyPodEvictionPolicy: null
# -- Resource requests and limits for the clustermesh-apiserver
resources: {}
# requests:
@@ -3518,6 +3696,11 @@ clustermesh:
# -- Interval for scrape metrics (apiserver metrics)
interval: "10s"
# @schema
# type: [null, string]
# @schema
# -- Timeout after which scrape is considered to be failed.
scrapeTimeout: ~
# @schema
# type: [null, array]
# @schema
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
@@ -3531,6 +3714,11 @@ clustermesh:
# -- Interval for scrape metrics (KVStoreMesh metrics)
interval: "10s"
# @schema
# type: [null, string]
# @schema
# -- Timeout after which scrape is considered to be failed.
scrapeTimeout: ~
# @schema
# type: [null, array]
# @schema
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
@@ -3544,6 +3732,11 @@ clustermesh:
# -- Interval for scrape metrics (etcd metrics)
interval: "10s"
# @schema
# type: [null, string]
# @schema
# -- Timeout after which scrape is considered to be failed.
scrapeTimeout: ~
# @schema
# type: [null, array]
# @schema
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
@@ -3553,10 +3746,6 @@ clustermesh:
# @schema
# -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
metricRelabelings: ~
# -- Configure external workloads support
externalWorkloads:
# -- Enable support for external workloads, such as VMs (false by default).
enabled: false
# -- Configure cgroup related configuration
cgroup:
autoMount:
@@ -3581,9 +3770,6 @@ cgroup:
sysctlfix:
# -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute.
enabled: true
# -- Configure whether to enable auto detect of terminating state for endpoints
# in order to support graceful termination.
enableK8sTerminatingEndpoint: true
# -- Configure whether to unload DNS policy rules on graceful shutdown
# dnsPolicyUnloadOnShutdown: false
@@ -3616,6 +3802,9 @@ dnsProxy:
proxyResponseMaxDelay: 100ms
# -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
# enableTransparentMode: true
# -- Pre-allocate ToFQDN identities. This reduces DNS proxy tail latency, at the potential cost of some
# unnecessary policymap entries. Disable this if you have a large (200+) number of unique ToFQDN selectors.
preAllocateIdentities: true
# -- SCTP Configuration Values
sctp:
# -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming.
@@ -3665,7 +3854,7 @@ authentication:
override: ~
repository: "docker.io/library/busybox"
tag: "1.37.0"
digest: "sha256:d82f458899c9696cb26a7c02d5568f81c8c8223f8661bb2a7988b269c8b9051e"
digest: "sha256:ab33eacc8251e3807b85bb6dba570e4698c3998eca6f0fc2ccb60575a563ea74"
useDigest: true
pullPolicy: "IfNotPresent"
# SPIRE agent configuration
@@ -3679,8 +3868,8 @@ authentication:
# @schema
override: ~
repository: "ghcr.io/spiffe/spire-agent"
tag: "1.9.6"
digest: "sha256:5106ac601272a88684db14daf7f54b9a45f31f77bb16a906bd5e87756ee7b97c"
tag: "1.12.4"
digest: "sha256:163970884fba18860cac93655dc32b6af85a5dcf2ebb7e3e119a10888eff8fcd"
useDigest: true
pullPolicy: "IfNotPresent"
# -- SPIRE agent service account
@@ -3734,8 +3923,8 @@ authentication:
# @schema
override: ~
repository: "ghcr.io/spiffe/spire-server"
tag: "1.9.6"
digest: "sha256:59a0b92b39773515e25e68a46c40d3b931b9c1860bc445a79ceb45a805cab8b4"
tag: "1.12.4"
digest: "sha256:34147f27066ab2be5cc10ca1d4bfd361144196467155d46c45f3519f41596e49"
useDigest: true
pullPolicy: "IfNotPresent"
# -- SPIRE server service account