uptime-kuma update

This commit is contained in:
Philip Haupt
2025-09-03 13:24:10 +02:00
parent 121d092fec
commit 29aeb8def1
3 changed files with 254 additions and 12 deletions

View File

@@ -5,8 +5,8 @@ metadata:
app.kubernetes.io/instance: uptime-kuma app.kubernetes.io/instance: uptime-kuma
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: uptime-kuma app.kubernetes.io/name: uptime-kuma
app.kubernetes.io/version: 1.23.13 app.kubernetes.io/version: 1.23.16
helm.sh/chart: uptime-kuma-2.21.2 helm.sh/chart: uptime-kuma-2.22.0
name: uptime-kuma name: uptime-kuma
namespace: uptime-kuma namespace: uptime-kuma
spec: spec:
@@ -27,17 +27,17 @@ metadata:
app.kubernetes.io/instance: uptime-kuma app.kubernetes.io/instance: uptime-kuma
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: uptime-kuma app.kubernetes.io/name: uptime-kuma
app.kubernetes.io/version: 1.23.13 app.kubernetes.io/version: 1.23.16
helm.sh/chart: uptime-kuma-2.21.2 helm.sh/chart: uptime-kuma-2.22.0
name: uptime-kuma-pvc name: uptime-kuma-pvc
namespace: uptime-kuma namespace: uptime-kuma
spec: spec:
storageClassName: openebs-3-replicas
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 1Gi storage: 1Gi
storageClassName: openebs-3-replicas
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
@@ -46,8 +46,8 @@ metadata:
app.kubernetes.io/instance: uptime-kuma app.kubernetes.io/instance: uptime-kuma
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: uptime-kuma app.kubernetes.io/name: uptime-kuma
app.kubernetes.io/version: 1.23.13 app.kubernetes.io/version: 1.23.16
helm.sh/chart: uptime-kuma-2.21.2 helm.sh/chart: uptime-kuma-2.22.0
name: uptime-kuma name: uptime-kuma
namespace: uptime-kuma namespace: uptime-kuma
spec: spec:
@@ -66,7 +66,8 @@ spec:
spec: spec:
automountServiceAccountToken: false automountServiceAccountToken: false
containers: containers:
- image: louislam/uptime-kuma:1.23.13-debian - env: null
image: louislam/uptime-kuma:1.23.16-debian
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
exec: exec:
@@ -113,8 +114,8 @@ metadata:
app.kubernetes.io/instance: uptime-kuma app.kubernetes.io/instance: uptime-kuma
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: uptime-kuma app.kubernetes.io/name: uptime-kuma
app.kubernetes.io/version: 1.23.13 app.kubernetes.io/version: 1.23.16
helm.sh/chart: uptime-kuma-2.21.2 helm.sh/chart: uptime-kuma-2.22.0
name: uptime-kuma-test-connection name: uptime-kuma-test-connection
namespace: uptime-kuma namespace: uptime-kuma
spec: spec:

View File

@@ -5,8 +5,8 @@ kind: Kustomization
helmCharts: helmCharts:
- name: uptime-kuma - name: uptime-kuma
repo: https://helm.irsigler.cloud repo: https://helm.irsigler.cloud
version: 2.21.2 version: 2.22.0
releaseName: uptime-kuma releaseName: uptime-kuma
includeCRDs: true includeCRDs: true
namespace: uptime-kuma namespace: uptime-kuma
valuesFile: values.yaml

241
uptime-kuma/src/values.yaml Normal file
View File

@@ -0,0 +1,241 @@
# Default values for uptime-kuma.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: louislam/uptime-kuma
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "1.23.16-debian"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# -- A custom namespace to override the default namespace for the deployed resources.
namespaceOverride: ""
# If this option is set to false a StateFulset instead of a Deployment is used
useDeploy: true
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels:
{}
# app: uptime-kuma
podEnv: []
# optional additional environment variables
# - name: "A_VARIABLE"
# value: "a-value"
podSecurityContext:
{}
# fsGroup: 2000
securityContext:
{}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 3001
nodePort:
annotations: {}
ingress:
enabled: false
# className: ""
extraLabels:
{}
# vhost: uptime-kuma.company.corp
annotations:
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
nginx.ingress.kubernetes.io/server-snippets: |
location / {
proxy_set_header Upgrade $http_upgrade;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
proxy_set_header Connection "upgrade";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_cache_bypass $http_upgrade;
}
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls:
[]
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
livenessProbe:
enabled: true
failureThreshold: 3
# Uptime-Kuma recommends to configure a delay of 180 seconds until the server fully started.
# https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.go#L3
initialDelaySeconds: 180
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
# The NodeJS Version of this Healthcheck is no longer supported, therefore we don't specify a node command.
# https://github.com/louislam/uptime-kuma/blob/ae224f9e188b1fc32ed8729818710975589cdce7/extra/healthcheck.js#L6
exec:
command:
- "extra/healthcheck"
readinessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
successThreshold: 1
exec:
command: []
httpGet:
path: /
port: 3001
scheme: HTTP
httpHeaders: []
volume:
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
# If you want to use a storage class other than the default, uncomment this
# line and define the storage class name
storageClassName: openebs-3-replicas
# Reuse your own pre-existing PVC.
existingClaim: ""
# -- A list of additional volumes to be added to the pod
additionalVolumes:
[]
# - name: "additional-certificates"
# configMap:
# name: "additional-certificates"
# optional: true
# defaultMode: 420
# -- A list of additional volumeMounts to be added to the pod
additionalVolumeMounts:
[]
# - name: "additional-certificates"
# mountPath: "/etc/ssl/certs/additional/additional-ca.pem"
# readOnly: true
# subPath: "additional-ca.pem"
strategy:
type: Recreate
# Mariadb configurations
mariadb:
enabled: false
architecture: standalone
auth:
database: uptime_kuma
username: uptime_kuma
password: ""
rootPassword: ""
# Prometheus ServiceMonitor configuration
serviceMonitor:
enabled: false
# -- Scrape interval. If not set, the Prometheus default scrape interval is used.
interval: 60s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 10s
# -- Scheme to use when scraping, e.g. http (default) or https.
scheme: ~
# -- TLS configuration to use when scraping, only applicable for scheme https.
tlsConfig: {}
# -- Prometheus [RelabelConfigs] to apply to samples before scraping
relabelings: []
# -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
metricRelabelings: []
# -- Prometheus ServiceMonitor selector, only select Prometheus's with these
# labels (if not set, select any Prometheus)
selector: {}
# -- Namespace where the ServiceMonitor resource should be created, default is
# the same as the release namespace
namespace: ~
# -- Additional labels to add to the ServiceMonitor
additionalLabels: {}
# -- Additional annotations to add to the ServiceMonitor
annotations: {}
# -- BasicAuth credentials for scraping metrics, use API token and any string for username
# basicAuth:
# username: "metrics"
# password: ""
# -- Use this option to set a custom DNS policy to the created deployment
dnsPolicy: ""
# -- Use this option to set custom DNS configurations to the created deployment
dnsConfig: {}
# -- Use this option to set custom PriorityClass to the created deployment
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
priorityClassName: ""
# -- Create a NetworkPolicy
networkPolicy:
# -- Enable/disable Network Policy
enabled: false
# -- Enable/disable Ingress policy type
ingress: true
# -- Enable/disable Egress policy type
egress: true
# -- Allow incoming connections only from specific Pods
# When set to true, the geoserver will accept connections from any source.
# When false, only Pods with the label {{ include "geoserver.fullname" . }}-client=true will have network access
allowExternal: true
# -- Selects particular namespaces for which all Pods are allowed as ingress sources
namespaceSelector: {}
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}