From 1a764dd456a7ffea063c411d82283334e4916a1a Mon Sep 17 00:00:00 2001 From: Philip Haupt <“der.mad.mob@gmail.com”> Date: Wed, 5 Nov 2025 00:48:43 +0100 Subject: [PATCH] valkey update --- valkey/main.yaml | 34 ++++--- valkey/src/kustomization.yaml | 2 +- valkey/src/values.yaml | 182 +++++++++++++++++++++++++++++++--- 3 files changed, 188 insertions(+), 30 deletions(-) diff --git a/valkey/main.yaml b/valkey/main.yaml index 746e748..35784db 100644 --- a/valkey/main.yaml +++ b/valkey/main.yaml @@ -6,8 +6,8 @@ metadata: app.kubernetes.io/instance: valkey app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: valkey - app.kubernetes.io/version: 8.1.3 - helm.sh/chart: valkey-0.7.4 + app.kubernetes.io/version: 8.1.4 + helm.sh/chart: valkey-0.7.7 name: valkey --- apiVersion: v1 @@ -20,13 +20,13 @@ metadata: app.kubernetes.io/instance: valkey app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: valkey - app.kubernetes.io/version: 8.1.3 - helm.sh/chart: valkey-0.7.4 + app.kubernetes.io/version: 8.1.4 + helm.sh/chart: valkey-0.7.7 name: valkey-config --- apiVersion: v1 data: - init.sh: "#!/bin/bash\nset -euo pipefail\n\n# Default config paths\nVALKEY_CONFIG=${VALKEY_CONFIG_PATH:-/data/conf/valkey.conf}\n\nLOGFILE=\"/data/init.log\"\nDATA_DIR=\"/data/conf\"\n\n# + init.sh: "#!/bin/sh\nset -euo pipefail\n\n# Default config paths\nVALKEY_CONFIG=${VALKEY_CONFIG_PATH:-/data/conf/valkey.conf}\n\nLOGFILE=\"/data/init.log\"\nDATA_DIR=\"/data/conf\"\n\n# Logging function\nlog() {\n echo \"$(date) $1\" | tee -a \"$LOGFILE\"\n}\n\n# Clean old log if requested\nif [ \"${KEEP_OLD_LOGS:-false}\" != \"true\" ]; then\n \ rm -f \"$LOGFILE\"\nfi\n\nif [ -f \"$LOGFILE\" ]; then\n log \"Detected restart @@ -44,8 +44,8 @@ metadata: app.kubernetes.io/instance: valkey app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: valkey - app.kubernetes.io/version: 8.1.3 - helm.sh/chart: valkey-0.7.4 + app.kubernetes.io/version: 8.1.4 + helm.sh/chart: valkey-0.7.7 name: valkey-init-scripts --- apiVersion: v1 @@ -55,8 +55,8 @@ metadata: app.kubernetes.io/instance: valkey app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: valkey - app.kubernetes.io/version: 8.1.3 - helm.sh/chart: valkey-0.7.4 + app.kubernetes.io/version: 8.1.4 + helm.sh/chart: valkey-0.7.7 name: valkey spec: ports: @@ -76,8 +76,8 @@ metadata: app.kubernetes.io/instance: valkey app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: valkey - app.kubernetes.io/version: 8.1.3 - helm.sh/chart: valkey-0.7.4 + app.kubernetes.io/version: 8.1.4 + helm.sh/chart: valkey-0.7.7 name: valkey spec: accessModes: @@ -95,8 +95,8 @@ metadata: app.kubernetes.io/instance: valkey app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: valkey - app.kubernetes.io/version: 8.1.3 - helm.sh/chart: valkey-0.7.4 + app.kubernetes.io/version: 8.1.4 + helm.sh/chart: valkey-0.7.7 name: valkey spec: replicas: 1 @@ -104,12 +104,14 @@ spec: matchLabels: app.kubernetes.io/instance: valkey app.kubernetes.io/name: valkey + strategy: + type: RollingUpdate template: metadata: labels: app.kubernetes.io/instance: valkey app.kubernetes.io/name: valkey - checksum/initconfig: 4ca0bfa021f2da0c4554a57544b03cfc + checksum/initconfig: 8c2eef07ad2fb92d71b03c260963d0b0 spec: automountServiceAccountToken: false containers: @@ -120,7 +122,7 @@ spec: env: - name: VALKEY_LOGLEVEL value: notice - image: docker.io/valkey/valkey:8.1.3 + image: docker.io/valkey/valkey:8.1.4 livenessProbe: exec: command: @@ -154,7 +156,7 @@ spec: initContainers: - command: - /scripts/init.sh - image: docker.io/valkey/valkey:8.1.3 + image: docker.io/valkey/valkey:8.1.4 imagePullPolicy: IfNotPresent name: valkey-init securityContext: diff --git a/valkey/src/kustomization.yaml b/valkey/src/kustomization.yaml index b897d58..65a3978 100644 --- a/valkey/src/kustomization.yaml +++ b/valkey/src/kustomization.yaml @@ -5,7 +5,7 @@ kind: Kustomization helmCharts: - name: valkey repo: https://valkey.io/valkey-helm/ - version: 0.7.4 + version: 0.7.7 releaseName: valkey includeCRDs: true namespace: valkey diff --git a/valkey/src/values.yaml b/valkey/src/values.yaml index 8fc2079..51359d8 100644 --- a/valkey/src/values.yaml +++ b/valkey/src/values.yaml @@ -50,6 +50,9 @@ service: type: ClusterIP # Port on which Valkey will be exposed port: 6379 + annotations: {} + # NodePort value (if service.type is NodePort) + nodePort: 0 # Network policy to control traffic to the pods # More info: https://kubernetes.io/docs/concepts/services-networking/network-policies/ @@ -83,6 +86,9 @@ dataStorage: # Use existing PVC by name (skip dynamic provisioning if set) persistentVolumeClaimName: "" + # Subpath inside PVC to mount + subPath: "" + # Name of the volume (referenced in deployment) volumeName: "valkey-data" @@ -111,9 +117,7 @@ extraValkeySecrets: mountPath: /run/secrets/valkey # Mount additional configMaps into the Valkey container -#extraValkeyConfigs: -# - name: valkey-config -# mountPath: /extravalkeyconfigs/ +extraValkeyConfigs: [] # Mount extra secrets as volume to init container (deprecated, use extraValkeySecrets) extraSecretValkeyConfigs: false @@ -144,21 +148,173 @@ tolerations: [] # Affinity rules for pod scheduling affinity: {} +# Set Deployment strategy. See https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: RollingUpdate + # See https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints -topologySpreadConstraints: {} +topologySpreadConstraints: [] # Valkey logging level: debug, verbose, notice, warning valkeyLogLevel: "notice" - -metrics: - # Enable Prometheus exporter sidecar - enabled: false - - exporter: - # Additional secrets to mount for metrics exporter - extraExporterSecrets: [] - # Environment variables to inject into Valkey container env: {} # Example: # LOG_LEVEL: info + +metrics: + # Enable Prometheus exporter sidecar + enabled: false + # Exporter configuration + exporter: + # Command to run in the metrics exporter container (overrides args) + command: [] + # Arguments to pass to the metrics exporter container + args: [] + # Example: + # - --redis.addr=redis:6379 + # Port on which the metrics exporter will listen + port: 9121 + # Image configuration + image: + # Prometheus exporter Docker image + repository: ghcr.io/oliver006/redis_exporter + # Image pull policy (Always, IfNotPresent, Never) + pullPolicy: IfNotPresent + # Image tag (leave empty to use latest) + tag: "v1.79.0" + resources: {} + # Example: + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # Extra volume mounts for metrics exporter container + extraVolumeMounts: [] + # Environment variables to inject into the metrics exporter container + extraEnvs: {} + # Example: + # LOG_LEVEL: info + securityContext: {} + # Example: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + + # Service configuration for the metrics exporter + service: + # Enable a separate service for the metrics exporter + enabled: true + # Service type (ClusterIP, NodePort, LoadBalancer) + type: ClusterIP + # Port on which the metrics exporter service will be exposed + ports: + http: 9121 + # Optional annotations for the metrics exporter service + annotations: {} + # Optional labels for the metrics exporter service + extraLabels: {} + # ServiceMonitor configuration for Prometheus Operator + serviceMonitor: + # Enable ServiceMonitor resource for scraping service metrics + enabled: false + # Port name or number to scrape metrics from + port: metrics + # Extra labels for the ServiceMonitor resource + extraLabels: {} + # Extra annotations for the ServiceMonitor resource + annotations: {} + # How often Prometheus should scrape metrics + interval: 30s + # Maximum duration allowed for a scrape request + scrapeTimeout: "" + # Relabeling rules applied before scraping metrics + relabelings: [] + # Relabeling rules applied before ingesting metrics + metricRelabelings: [] + # Set honorLabels to true to preserve original metric labels + honorLabels: false + # Extra labels to help Prometheus discover ServiceMonitor resources + additionalLabels: {} + # Pod labels to copy onto the generated metrics + podTargetLabels: [] + # Maximum number of samples to collect per Pod scrape + sampleLimit: false + # Maximum number of scrape targets allowed + targetLimit: false + podMonitor: + # Enable PodMonitor resource for scraping pod metrics + enabled: false + # Port name or number to scrape metrics from + port: metrics + # Extra labels for the ServiceMonitor resource + extraLabels: {} + # Extra annotations for the ServiceMonitor resource + annotations: {} + # Frequency for Prometheus to scrape pod metrics + interval: 30s + # Time limit for each scrape operation + scrapeTimeout: "" + # Relabeling rules to apply before scraping pod metrics + relabelings: [] + # Relabeling rules to apply before ingesting pod metrics + metricRelabelings: [] + # If true, keeps original labels from the pod metrics + honorLabels: false + # Additional labels for Prometheus to find PodMonitor resources + additionalLabels: {} + # Pod labels to attach to the metrics + podTargetLabels: [] + # Maximum samples to scrape from each Pod + sampleLimit: false + # Maximum number of pods to scrape + targetLimit: false + + # PrometheusRule configuration for alerting rules (used by kube-prometheus-stack) + prometheusRule: + # Enable creation of PrometheusRule resource + enabled: false + # Extra labels to add to the PrometheusRule resource + extraLabels: {} + # Extra annotations to add to the PrometheusRule resource + extraAnnotations: {} + # List of Prometheus alerting rules + rules: [] + # Example alerting rules: + # - alert: ValkeyDown + # annotations: + # summary: Valkey instance {{ "{{ $labels.instance }}" }} down + # description: Valkey instance {{ "{{ $labels.instance }}" }} is down. + # expr: | + # redis_up{service="{{ include "valkey.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # - alert: ValkeyMemoryHigh + # annotations: + # summary: Valkey instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Valkey instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # expr: | + # redis_memory_used_bytes{service="{{ include "valkey.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ include "valkey.fullname" . }}-metrics"} + # > 90 <= 100 + # for: 2m + # labels: + # severity: error + # - alert: ValkeyKeyEviction + # annotations: + # summary: Valkey instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Valkey instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + # expr: | + # increase(redis_evicted_keys_total{service="{{ include "valkey.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error