DirectorySecurity AdvisoriesPricing
Sign in
Directory
kube-prometheus-stack logoHELM

kube-prometheus-stack

Helm chart
Last changed
Request a free trial

Contact our team to test out this Helm chart and related images for free. Please also indicate any other images you would like to evaluate.

Overview
Chart versions
Default values
Chart metadata
Images

Tag:

1
# Default values for kube-prometheus-stack.
2
# This is a YAML-formatted file.
3
# Declare variables to be passed into your templates.
4
5
## Provide a name in place of kube-prometheus-stack for `app:` labels
6
##
7
nameOverride: ""
8
## Override the deployment namespace
9
##
10
namespaceOverride: ""
11
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.26.6
12
##
13
kubeTargetVersionOverride: ""
14
## Allow kubeVersion to be overridden while creating the ingress
15
##
16
kubeVersionOverride: ""
17
## Provide a name to substitute for the full names of resources
18
##
19
fullnameOverride: ""
20
## Labels to apply to all resources
21
##
22
commonLabels: {}
23
# scmhash: abc123
24
# myLabel: aakkmd
25
26
## Install Prometheus Operator CRDs
27
##
28
crds:
29
enabled: true
30
## The CRD upgrade job mitigates the limitation of helm not being able to upgrade CRDs.
31
## The job will apply the CRDs to the cluster before the operator is deployed, using helm hooks.
32
## It deploys a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs.
33
## This feature is in preview, off by default and may change in the future.
34
upgradeJob:
35
enabled: false
36
forceConflicts: false
37
image:
38
busybox:
39
registry: cgr.dev
40
repository: chainguard-private/busybox
41
tag: latest
42
sha: sha256:8322ce98ac75f980c930665c041aa2db8c145ca8d5f8a9a120a18f33d25ecc77
43
pullPolicy: IfNotPresent
44
kubectl:
45
registry: cgr.dev
46
repository: chainguard-private/kubectl
47
tag: latest
48
sha: sha256:b37591383f2cb5642d2477662cf330f9515dd1fea24565d3e5c56eec8de6d3c7
49
pullPolicy: IfNotPresent
50
env: {}
51
## Define resources requests and limits for single Pods.
52
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
53
##
54
resources: {}
55
## Additional volumes
56
##
57
extraVolumes: []
58
## Additional volume mounts
59
##
60
extraVolumeMounts: []
61
## Define which Nodes the Pods are scheduled on.
62
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
63
##
64
nodeSelector: {}
65
## Assign custom affinity rules to the upgrade-crd job
66
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
67
##
68
affinity: {}
69
# nodeAffinity:
70
# requiredDuringSchedulingIgnoredDuringExecution:
71
# nodeSelectorTerms:
72
# - matchExpressions:
73
# - key: kubernetes.io/e2e-az-name
74
# operator: In
75
# values:
76
# - e2e-az1
77
# - e2e-az2
78
79
## If specified, the pod's tolerations.
80
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
81
##
82
tolerations: []
83
# - key: "key"
84
# operator: "Equal"
85
# value: "value"
86
# effect: "NoSchedule"
87
88
## If specified, the pod's topology spread constraints.
89
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
90
##
91
topologySpreadConstraints: []
92
# - maxSkew: 1
93
# topologyKey: topology.kubernetes.io/zone
94
# whenUnsatisfiable: DoNotSchedule
95
# labelSelector:
96
# matchLabels:
97
# app: alertmanager
98
99
# ## Labels to add to the upgrade-crd job
100
# ##
101
labels: {}
102
## Annotations to add to the upgrade-crd job
103
##
104
annotations: {}
105
## Labels to add to the upgrade-crd pod
106
##
107
podLabels: {}
108
## Annotations to add to the upgrade-crd pod
109
##
110
podAnnotations: {}
111
## Service account for upgrade crd job to use.
112
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
113
##
114
serviceAccount:
115
create: true
116
name: ""
117
annotations: {}
118
labels: {}
119
automountServiceAccountToken: true
120
## Automounting API credentials for upgrade crd job pod.
121
##
122
automountServiceAccountToken: true
123
## Container-specific security context configuration
124
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
125
##
126
containerSecurityContext:
127
allowPrivilegeEscalation: false
128
readOnlyRootFilesystem: true
129
capabilities:
130
drop:
131
- ALL
132
## SecurityContext holds pod-level security attributes and common container settings.
133
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
134
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
135
##
136
podSecurityContext:
137
fsGroup: 65534
138
runAsGroup: 65534
139
runAsNonRoot: true
140
runAsUser: 65534
141
seccompProfile:
142
type: RuntimeDefault
143
## Custom rules to override "for" and "severity" in defaultRules
144
##
145
customRules: {}
146
# AlertmanagerFailedReload:
147
# for: 3m
148
# AlertmanagerMembersInconsistent:
149
# for: 5m
150
# severity: "warning"
151
152
## Create default rules for monitoring the cluster
153
##
154
defaultRules:
155
create: true
156
rules:
157
alertmanager: true
158
etcd: true
159
configReloaders: true
160
general: true
161
k8sContainerCpuUsageSecondsTotal: true
162
k8sContainerMemoryCache: true
163
k8sContainerMemoryRss: true
164
k8sContainerMemorySwap: true
165
k8sContainerResource: true
166
k8sContainerMemoryWorkingSetBytes: true
167
k8sPodOwner: true
168
kubeApiserverAvailability: true
169
kubeApiserverBurnrate: true
170
kubeApiserverHistogram: true
171
kubeApiserverSlos: true
172
kubeControllerManager: true
173
kubelet: true
174
kubeProxy: true
175
kubePrometheusGeneral: true
176
kubePrometheusNodeRecording: true
177
kubernetesApps: true
178
kubernetesResources: true
179
kubernetesStorage: true
180
kubernetesSystem: true
181
kubeSchedulerAlerting: true
182
kubeSchedulerRecording: true
183
kubeStateMetrics: true
184
network: true
185
node: true
186
nodeExporterAlerting: true
187
nodeExporterRecording: true
188
prometheus: true
189
prometheusOperator: true
190
windows: true
191
# Defines the operator for namespace selection in rules
192
# Use "=~" to include namespaces matching the pattern (default)
193
# Use "!~" to exclude namespaces matching the pattern
194
appNamespacesOperator: "=~"
195
## Reduce app namespace alert scope
196
appNamespacesTarget: ".*"
197
## Set keep_firing_for for all alerts
198
keepFiringFor: ""
199
## Labels for default rules
200
labels: {}
201
## Annotations for default rules
202
annotations: {}
203
## Additional labels for PrometheusRule alerts
204
additionalRuleLabels: {}
205
## Additional annotations for PrometheusRule alerts
206
additionalRuleAnnotations: {}
207
## Additional labels for specific PrometheusRule alert groups
208
additionalRuleGroupLabels:
209
alertmanager: {}
210
etcd: {}
211
configReloaders: {}
212
general: {}
213
k8sContainerCpuUsageSecondsTotal: {}
214
k8sContainerMemoryCache: {}
215
k8sContainerMemoryRss: {}
216
k8sContainerMemorySwap: {}
217
k8sContainerResource: {}
218
k8sPodOwner: {}
219
kubeApiserverAvailability: {}
220
kubeApiserverBurnrate: {}
221
kubeApiserverHistogram: {}
222
kubeApiserverSlos: {}
223
kubeControllerManager: {}
224
kubelet: {}
225
kubeProxy: {}
226
kubePrometheusGeneral: {}
227
kubePrometheusNodeRecording: {}
228
kubernetesApps: {}
229
kubernetesResources: {}
230
kubernetesStorage: {}
231
kubernetesSystem: {}
232
kubeSchedulerAlerting: {}
233
kubeSchedulerRecording: {}
234
kubeStateMetrics: {}
235
network: {}
236
node: {}
237
nodeExporterAlerting: {}
238
nodeExporterRecording: {}
239
prometheus: {}
240
prometheusOperator: {}
241
## Additional annotations for specific PrometheusRule alert groups
242
additionalRuleGroupAnnotations:
243
alertmanager: {}
244
etcd: {}
245
configReloaders: {}
246
general: {}
247
k8sContainerCpuUsageSecondsTotal: {}
248
k8sContainerMemoryCache: {}
249
k8sContainerMemoryRss: {}
250
k8sContainerMemorySwap: {}
251
k8sContainerResource: {}
252
k8sPodOwner: {}
253
kubeApiserverAvailability: {}
254
kubeApiserverBurnrate: {}
255
kubeApiserverHistogram: {}
256
kubeApiserverSlos: {}
257
kubeControllerManager: {}
258
kubelet: {}
259
kubeProxy: {}
260
kubePrometheusGeneral: {}
261
kubePrometheusNodeRecording: {}
262
kubernetesApps: {}
263
kubernetesResources: {}
264
kubernetesStorage: {}
265
kubernetesSystem: {}
266
kubeSchedulerAlerting: {}
267
kubeSchedulerRecording: {}
268
kubeStateMetrics: {}
269
network: {}
270
node: {}
271
nodeExporterAlerting: {}
272
nodeExporterRecording: {}
273
prometheus: {}
274
prometheusOperator: {}
275
additionalAggregationLabels: []
276
## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules.
277
runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks"
278
node:
279
fsSelector: 'fstype!=""'
280
# fsSelector: 'fstype=~"ext[234]|btrfs|xfs|zfs"'
281
## Disabled PrometheusRule alerts
282
disabled: {}
283
# KubeAPIDown: true
284
# NodeRAIDDegraded: true
285
## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster.
286
##
287
# additionalPrometheusRules: []
288
# - name: my-rule-file
289
# groups:
290
# - name: my_group
291
# rules:
292
# - record: my_record
293
# expr: 100 * my_record
294
295
## Provide custom recording or alerting rules to be deployed into the cluster.
296
##
297
additionalPrometheusRulesMap: {}
298
# rule-name:
299
# groups:
300
# - name: my_group
301
# rules:
302
# - record: my_record
303
# expr: 100 * my_record
304
305
##
306
global:
307
rbac:
308
create: true
309
## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs
310
## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
311
createAggregateClusterRoles: false
312
## Global image registry to use if it needs to be overridden for some specific use cases (e.g. local registries, custom images, ...)
313
##
314
imageRegistry: ""
315
## Reference to one or more secrets to be used when pulling images
316
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
317
##
318
imagePullSecrets: []
319
# - name: "image-pull-secret"
320
# or
321
# - "image-pull-secret"
322
windowsMonitoring:
323
## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter')
324
enabled: false
325
## Configuration for prometheus-windows-exporter
326
## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter
327
##
328
prometheus-windows-exporter:
329
## Enable ServiceMonitor and set Kubernetes label to use as a job label
330
##
331
prometheus:
332
monitor:
333
enabled: true
334
jobLabel: jobLabel
335
releaseLabel: true
336
## Set job label to 'windows-exporter' as required by the default Prometheus rules and Grafana dashboards
337
##
338
podLabels:
339
jobLabel: windows-exporter
340
## Enable memory and container metrics as required by the default Prometheus rules and Grafana dashboards
341
##
342
config: |-
343
collectors:
344
enabled: '[defaults],memory,container'
345
## Configuration for alertmanager
346
## ref: https://prometheus.io/docs/alerting/alertmanager/
347
##
348
alertmanager:
349
## Deploy alertmanager
350
##
351
enabled: true
352
# Optional: Override the namespace where Alertmanager will be deployed.
353
namespaceOverride: ""
354
## Annotations for Alertmanager
355
##
356
annotations: {}
357
## Additional labels for Alertmanager
358
##
359
additionalLabels: {}
360
## API that Prometheus will use to communicate with alertmanager. Possible values are v1, v2
361
##
362
apiVersion: v2
363
## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features.
364
##
365
enableFeatures: []
366
## Create dashboard configmap even if alertmanager deployment has been disabled
367
##
368
forceDeployDashboards: false
369
## Network Policy configuration
370
##
371
networkPolicy:
372
# -- Enable network policy for Alertmanager
373
enabled: false
374
# -- Define policy types. If egress is enabled, both Ingress and Egress will be used
375
# Valid values are ["Ingress"] or ["Ingress", "Egress"]
376
##
377
policyTypes:
378
- Ingress
379
# -- Gateway (formerly ingress controller) configuration
380
##
381
gateway:
382
# -- Gateway namespace
383
##
384
namespace: ""
385
# -- Gateway pod labels
386
##
387
podLabels: {}
388
# app.kubernetes.io/name: ingress-nginx
389
# -- Additional custom ingress rules
390
##
391
additionalIngress: []
392
# - from:
393
# - namespaceSelector:
394
# matchLabels:
395
# name: another-namespace
396
# podSelector:
397
# matchLabels:
398
# app: another-app
399
# - from:
400
# - podSelector:
401
# matchLabels:
402
# app.kubernetes.io/name: loki
403
# ports:
404
# - port: 9093
405
# protocol: TCP
406
407
# -- Configure egress rules
408
##
409
egress:
410
# -- Enable egress rules. When enabled, policyTypes will include Egress
411
##
412
enabled: false
413
# -- Custom egress rules
414
##
415
rules: []
416
# - to:
417
# - namespaceSelector: {}
418
# podSelector:
419
# matchLabels:
420
# name: smtp-relay
421
# ports:
422
# - port: 25
423
# protocol: TCP
424
# -- Enable rules for alertmanager cluster traffic
425
##
426
enableClusterRules: true
427
# -- Configure monitoring component rules
428
##
429
monitoringRules:
430
# -- Enable ingress from Prometheus
431
##
432
prometheus: true
433
# -- Enable ingress for config reloader metrics
434
##
435
configReloader: true
436
## Service account for Alertmanager to use.
437
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
438
##
439
serviceAccount:
440
create: true
441
name: ""
442
annotations: {}
443
automountServiceAccountToken: true
444
## Configure pod disruption budgets for Alertmanager
445
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
446
##
447
podDisruptionBudget:
448
enabled: false
449
minAvailable: 1
450
# maxUnavailable: ""
451
unhealthyPodEvictionPolicy: AlwaysAllow
452
## Enable vertical pod autoscaler support for Alertmanager
453
## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
454
##
455
verticalPodAutoscaler:
456
enabled: false
457
# Recommender responsible for generating recommendation for the object.
458
# List should be empty (then the default recommender will generate the recommendation)
459
# or contain exactly one recommender.
460
# recommenders:
461
# - name: custom-recommender-performance
462
463
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
464
controlledResources: []
465
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
466
# controlledValues: RequestsAndLimits
467
468
# Define the max allowed resources for the pod
469
maxAllowed: {}
470
# cpu: 200m
471
# memory: 100Mi
472
# Define the min allowed resources for the pod
473
minAllowed: {}
474
# cpu: 200m
475
# memory: 100Mi
476
477
updatePolicy:
478
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
479
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
480
updateMode: Recreate
481
## Alertmanager configuration directives
482
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
483
## https://prometheus.io/webtools/alerting/routing-tree-editor/
484
##
485
config:
486
global:
487
resolve_timeout: 5m
488
inhibit_rules:
489
- source_matchers:
490
- 'severity = critical'
491
target_matchers:
492
- 'severity =~ warning|info'
493
equal:
494
- 'namespace'
495
- 'alertname'
496
- source_matchers:
497
- 'severity = warning'
498
target_matchers:
499
- 'severity = info'
500
equal:
501
- 'namespace'
502
- 'alertname'
503
- source_matchers:
504
- 'alertname = InfoInhibitor'
505
target_matchers:
506
- 'severity = info'
507
equal:
508
- 'namespace'
509
- target_matchers:
510
- 'alertname = InfoInhibitor'
511
route:
512
group_by: ['namespace']
513
group_wait: 30s
514
group_interval: 5m
515
repeat_interval: 12h
516
receiver: 'null'
517
routes:
518
- receiver: 'null'
519
matchers:
520
- alertname = "Watchdog"
521
receivers:
522
- name: 'null'
523
templates:
524
- '/etc/alertmanager/config/*.tmpl'
525
## Alertmanager configuration directives (as string type, preferred over the config hash map)
526
## stringConfig will be used only if tplConfig is true
527
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
528
## https://prometheus.io/webtools/alerting/routing-tree-editor/
529
##
530
stringConfig: ""
531
## Pass the Alertmanager configuration directives through Helm's templating
532
## engine. If the Alertmanager configuration contains Alertmanager templates,
533
## they'll need to be properly escaped so that they are not interpreted by
534
## Helm
535
## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
536
## https://prometheus.io/docs/alerting/configuration/#tmpl_string
537
## https://prometheus.io/docs/alerting/notifications/
538
## https://prometheus.io/docs/alerting/notification_examples/
539
tplConfig: false
540
## Alertmanager template files to format alerts
541
## By default, templateFiles are placed in /etc/alertmanager/config/ and if
542
## they have a .tmpl file suffix will be loaded. See config.templates above
543
## to change, add other suffixes. If adding other suffixes, be sure to update
544
## config.templates above to include those suffixes.
545
## ref: https://prometheus.io/docs/alerting/notifications/
546
## https://prometheus.io/docs/alerting/notification_examples/
547
##
548
templateFiles: {}
549
#
550
## An example template:
551
# template_1.tmpl: |-
552
# {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
553
#
554
# {{ define "slack.myorg.text" }}
555
# {{- $root := . -}}
556
# {{ range .Alerts }}
557
# *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
558
# *Cluster:* {{ template "cluster" $root }}
559
# *Description:* {{ .Annotations.description }}
560
# *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
561
# *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
562
# *Details:*
563
# {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}`
564
# {{ end }}
565
# {{ end }}
566
# {{ end }}
567
568
ingress:
569
enabled: false
570
ingressClassName: ""
571
annotations: {}
572
labels: {}
573
## Override ingress to a different defined port on the service
574
# servicePort: 8081
575
## Override ingress to a different service then the default, this is useful if you need to
576
## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0)
577
# serviceName: kube-prometheus-stack-alertmanager-0
578
579
## Hosts must be provided if Ingress is enabled.
580
##
581
hosts: []
582
# - alertmanager.domain.com
583
584
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
585
##
586
paths: []
587
# - /
588
589
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
590
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
591
# pathType: ImplementationSpecific
592
593
## TLS configuration for Alertmanager Ingress
594
## Secret must be manually created in the namespace
595
##
596
tls: []
597
# - secretName: alertmanager-general-tls
598
# hosts:
599
# - alertmanager.example.com
600
# -- BETA: Configure the gateway routes for the chart here.
601
# More routes can be added by adding a dictionary key like the 'main' route.
602
# Be aware that this is an early beta of this feature,
603
# kube-prometheus-stack does not guarantee this works and is subject to change.
604
# Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
605
# [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
606
route:
607
main:
608
# -- Enables or disables the route
609
enabled: false
610
# -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
611
apiVersion: gateway.networking.k8s.io/v1
612
# -- Set the route kind
613
# Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
614
kind: HTTPRoute
615
annotations: {}
616
labels: {}
617
hostnames: []
618
# - my-filter.example.com
619
parentRefs: []
620
# - name: acme-gw
621
622
# -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
623
## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
624
## matches, filters and additionalRules will be ignored if this is set to true. Be are
625
httpsRedirect: false
626
matches:
627
- path:
628
type: PathPrefix
629
value: /
630
## Filters define the filters that are applied to requests that match this rule.
631
filters: []
632
## Session persistence configuration for the route rule.
633
sessionPersistence: {}
634
# sessionName: route
635
# type: Cookie
636
# absoluteTimeout: 12h
637
# cookieConfig:
638
# lifetimeType: Permanent
639
640
## Additional custom rules that can be added to the route
641
additionalRules: []
642
## Configuration for Alertmanager secret
643
##
644
secret:
645
annotations: {}
646
## Configuration for creating an Ingress that will map to each Alertmanager replica service
647
## alertmanager.servicePerReplica must be enabled
648
##
649
ingressPerReplica:
650
enabled: false
651
ingressClassName: ""
652
annotations: {}
653
labels: {}
654
## Final form of the hostname for each per replica ingress is
655
## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
656
##
657
## Prefix for the per replica ingress that will have `-$replicaNumber`
658
## appended to the end
659
hostPrefix: ""
660
## Domain that will be used for the per replica ingress
661
hostDomain: ""
662
## Paths to use for ingress rules
663
##
664
paths: []
665
# - /
666
667
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
668
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
669
# pathType: ImplementationSpecific
670
671
## Secret name containing the TLS certificate for alertmanager per replica ingress
672
## Secret must be manually created in the namespace
673
tlsSecretName: ""
674
## Separated secret for each per replica Ingress. Can be used together with cert-manager
675
##
676
tlsSecretPerReplica:
677
enabled: false
678
## Final form of the secret for each per replica ingress is
679
## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
680
##
681
prefix: "alertmanager"
682
## Configuration for Alertmanager service
683
##
684
service:
685
enabled: true
686
annotations: {}
687
labels: {}
688
clusterIP: ""
689
ipDualStack:
690
enabled: false
691
ipFamilies: ["IPv6", "IPv4"]
692
ipFamilyPolicy: "PreferDualStack"
693
## Port for Alertmanager Service to listen on
694
##
695
port: 9093
696
## Port for Alertmanager cluster communication
697
##
698
# clusterPort: 9094
699
## To be used with a proxy extraContainer port
700
##
701
targetPort: 9093
702
## Port to expose on each node
703
## Only used if service.type is 'NodePort'
704
##
705
nodePort: 30903
706
## List of IP addresses at which the Prometheus server service is available
707
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
708
##
709
710
## Additional ports to open for Alertmanager service
711
##
712
additionalPorts: []
713
# - name: oauth-proxy
714
# port: 8081
715
# targetPort: 8081
716
# - name: oauth-metrics
717
# port: 8082
718
# targetPort: 8082
719
720
externalIPs: []
721
loadBalancerIP: ""
722
loadBalancerSourceRanges: []
723
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
724
##
725
externalTrafficPolicy: Cluster
726
## If you want to make sure that connections from a particular client are passed to the same Pod each time
727
## Accepts 'ClientIP' or 'None'
728
##
729
sessionAffinity: None
730
## If you want to modify the ClientIP sessionAffinity timeout
731
## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
732
##
733
sessionAffinityConfig:
734
clientIP:
735
timeoutSeconds: 10800
736
## Service type
737
##
738
type: ClusterIP
739
## Configuration for creating a separate Service for each statefulset Alertmanager replica
740
##
741
servicePerReplica:
742
enabled: false
743
annotations: {}
744
## Port for Alertmanager Service per replica to listen on
745
##
746
port: 9093
747
## To be used with a proxy extraContainer port
748
targetPort: 9093
749
## Port to expose on each node
750
## Only used if servicePerReplica.type is 'NodePort'
751
##
752
nodePort: 30904
753
## Loadbalancer source IP ranges
754
## Only used if servicePerReplica.type is "LoadBalancer"
755
loadBalancerSourceRanges: []
756
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
757
##
758
externalTrafficPolicy: Cluster
759
## Service type
760
##
761
type: ClusterIP
762
## Configuration for creating a ServiceMonitor for AlertManager
763
##
764
serviceMonitor:
765
## If true, a ServiceMonitor will be created for the AlertManager service.
766
##
767
selfMonitor: true
768
## Scrape interval. If not set, the Prometheus default scrape interval is used.
769
##
770
interval: ""
771
## Additional labels
772
##
773
additionalLabels: {}
774
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
775
##
776
sampleLimit: 0
777
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
778
##
779
targetLimit: 0
780
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
781
##
782
labelLimit: 0
783
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
784
##
785
labelNameLengthLimit: 0
786
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
787
##
788
labelValueLengthLimit: 0
789
## proxyUrl: URL of a proxy that should be used for scraping.
790
##
791
proxyUrl: ""
792
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
793
scheme: ""
794
## enableHttp2: Whether to enable HTTP2.
795
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#endpoint
796
enableHttp2: true
797
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
798
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
799
tlsConfig: {}
800
bearerTokenFile:
801
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
802
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
803
##
804
metricRelabelings: []
805
# - action: keep
806
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
807
# sourceLabels: [__name__]
808
809
## RelabelConfigs to apply to samples before scraping
810
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
811
##
812
relabelings: []
813
# - sourceLabels: [__meta_kubernetes_pod_node_name]
814
# separator: ;
815
# regex: ^(.*)$
816
# targetLabel: nodename
817
# replacement: $1
818
# action: replace
819
820
## Additional Endpoints
821
##
822
additionalEndpoints: []
823
# - port: oauth-metrics
824
# path: /metrics
825
## Settings affecting alertmanagerSpec
826
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerspec
827
##
828
alertmanagerSpec:
829
## Statefulset's persistent volume claim retention policy
830
## whenDeleted and whenScaled determine whether
831
## statefulset's PVCs are deleted (true) or retained (false)
832
## on scaling down and deleting statefulset, respectively.
833
## Requires Kubernetes version 1.27.0+.
834
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
835
persistentVolumeClaimRetentionPolicy: {}
836
# whenDeleted: Retain
837
# whenScaled: Retain
838
839
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
840
## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
841
##
842
podMetadata: {}
843
##
844
serviceName:
845
## Image of Alertmanager
846
##
847
image:
848
registry: cgr.dev
849
repository: chainguard-private/prometheus-alertmanager
850
tag: latest
851
sha: sha256:b8361113442ad39e2f8e3cf70e0621e8a0e0975d53d79c3646ad875ae9301da8
852
pullPolicy: IfNotPresent
853
## If true then the user will be responsible to provide a secret with alertmanager configuration
854
## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
855
##
856
useExistingSecret: false
857
## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
858
## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
859
##
860
secrets: []
861
## If false then the user will opt out of automounting API credentials.
862
##
863
automountServiceAccountToken: true
864
## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
865
## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
866
##
867
configMaps: []
868
## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
869
## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
870
##
871
# configSecret:
872
873
## WebTLSConfig defines the TLS parameters for HTTPS
874
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerwebspec
875
web: {}
876
## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
877
##
878
alertmanagerConfigSelector: {}
879
## Example which selects all alertmanagerConfig resources
880
## with label "alertconfig" with values any of "example-config" or "example-config-2"
881
# alertmanagerConfigSelector:
882
# matchExpressions:
883
# - key: alertconfig
884
# operator: In
885
# values:
886
# - example-config
887
# - example-config-2
888
#
889
## Example which selects all alertmanagerConfig resources with label "role" set to "example-config"
890
# alertmanagerConfigSelector:
891
# matchLabels:
892
# role: example-config
893
894
## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace.
895
##
896
alertmanagerConfigNamespaceSelector: {}
897
## Example which selects all namespaces
898
## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2"
899
# alertmanagerConfigNamespaceSelector:
900
# matchExpressions:
901
# - key: alertmanagerconfig
902
# operator: In
903
# values:
904
# - example-namespace
905
# - example-namespace-2
906
907
## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled"
908
# alertmanagerConfigNamespaceSelector:
909
# matchLabels:
910
# alertmanagerconfig: enabled
911
912
## AlermanagerConfig to be used as top level configuration
913
##
914
alertmanagerConfiguration: {}
915
## Example with select a global alertmanagerconfig
916
# alertmanagerConfiguration:
917
# name: global-alertmanager-Configuration
918
919
## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg:
920
##
921
alertmanagerConfigMatcherStrategy: {}
922
## Example with use OnNamespace strategy
923
# alertmanagerConfigMatcherStrategy:
924
# type: OnNamespace
925
926
## Additional command line arguments to pass to Alertmanager (in addition to those generated by the chart)
927
additionalArgs: []
928
## Define Log Format
929
# Use logfmt (default) or json logging
930
logFormat: logfmt
931
## Log level for Alertmanager to be configured with.
932
##
933
logLevel: info
934
## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
935
## running cluster equal to the expected size.
936
replicas: 1
937
## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
938
## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
939
##
940
retention: 120h
941
## Storage is the definition of how storage will be used by the Alertmanager instances.
942
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
943
##
944
storage: {}
945
# volumeClaimTemplate:
946
# spec:
947
# storageClassName: gluster
948
# accessModes: ["ReadWriteOnce"]
949
# resources:
950
# requests:
951
# storage: 50Gi
952
# selector: {}
953
954
## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
955
##
956
externalUrl:
957
## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
958
## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
959
##
960
routePrefix: /
961
## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS.
962
scheme: ""
963
## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS.
964
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
965
tlsConfig: {}
966
## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
967
##
968
paused: false
969
## Define which Nodes the Pods are scheduled on.
970
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
971
##
972
nodeSelector: {}
973
## Define resources requests and limits for single Pods.
974
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
975
##
976
resources: {}
977
# requests:
978
# memory: 400Mi
979
980
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
981
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
982
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
983
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
984
##
985
podAntiAffinity: "soft"
986
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
987
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
988
##
989
podAntiAffinityTopologyKey: kubernetes.io/hostname
990
## Assign custom affinity rules to the alertmanager instance
991
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
992
##
993
affinity: {}
994
# nodeAffinity:
995
# requiredDuringSchedulingIgnoredDuringExecution:
996
# nodeSelectorTerms:
997
# - matchExpressions:
998
# - key: kubernetes.io/e2e-az-name
999
# operator: In
1000
# values:
1001
# - e2e-az1
1002
# - e2e-az2
1003
1004
## If specified, the pod's tolerations.
1005
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
1006
##
1007
tolerations: []
1008
# - key: "key"
1009
# operator: "Equal"
1010
# value: "value"
1011
# effect: "NoSchedule"
1012
1013
## If specified, the pod's topology spread constraints.
1014
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
1015
##
1016
topologySpreadConstraints: []
1017
# - maxSkew: 1
1018
# topologyKey: topology.kubernetes.io/zone
1019
# whenUnsatisfiable: DoNotSchedule
1020
# labelSelector:
1021
# matchLabels:
1022
# app: alertmanager
1023
1024
## SecurityContext holds pod-level security attributes and common container settings.
1025
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
1026
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
1027
##
1028
securityContext:
1029
runAsGroup: 2000
1030
runAsNonRoot: true
1031
runAsUser: 1000
1032
fsGroup: 2000
1033
seccompProfile:
1034
type: RuntimeDefault
1035
## Use the host's user namespace for Alertmanager pods.
1036
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
1037
hostUsers: ~
1038
## DNS configuration for Alertmanager.
1039
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig
1040
dnsConfig: {}
1041
## DNS policy for Alertmanager.
1042
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias
1043
dnsPolicy: ""
1044
## Enable hostNetwork for Alertmanager.
1045
hostNetwork: false
1046
## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
1047
## Note this is only for the Alertmanager UI, not the gossip communication.
1048
##
1049
listenLocal: false
1050
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
1051
##
1052
containers: []
1053
# containers:
1054
# - name: oauth-proxy
1055
# image: quay.io/oauth2-proxy/oauth2-proxy:v7.15.2
1056
# args:
1057
# - --upstream=http://127.0.0.1:9093
1058
# - --http-address=0.0.0.0:8081
1059
# - --metrics-address=0.0.0.0:8082
1060
# - ...
1061
# ports:
1062
# - containerPort: 8081
1063
# name: oauth-proxy
1064
# protocol: TCP
1065
# - containerPort: 8082
1066
# name: oauth-metrics
1067
# protocol: TCP
1068
# resources: {}
1069
1070
# Additional volumes on the output StatefulSet definition.
1071
volumes: []
1072
# Additional VolumeMounts on the output StatefulSet definition.
1073
volumeMounts: []
1074
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
1075
## (permissions, dir tree) on mounted volumes before starting prometheus
1076
initContainers: []
1077
## Priority class assigned to the Pods
1078
##
1079
priorityClassName: ""
1080
## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
1081
##
1082
additionalPeers: []
1083
## PortName to use for Alert Manager.
1084
##
1085
portName: "http-web"
1086
## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
1087
##
1088
clusterAdvertiseAddress: false
1089
## clusterGossipInterval determines interval between gossip attempts.
1090
## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1091
clusterGossipInterval: ""
1092
## clusterPeerTimeout determines timeout for cluster peering.
1093
## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1094
clusterPeerTimeout: ""
1095
## clusterPushpullInterval determines interval between pushpull attempts.
1096
## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1097
clusterPushpullInterval: ""
1098
## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster.
1099
clusterLabel: ""
1100
## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
1101
## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
1102
forceEnableClusterMode: false
1103
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
1104
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
1105
minReadySeconds: 0
1106
## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
1107
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
1108
podManagementPolicy: ""
1109
## Update strategy for the StatefulSet.
1110
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
1111
updateStrategy: {}
1112
# type: RollingUpdate
1113
# rollingUpdate:
1114
# maxUnavailable: 1
1115
1116
## Duration in seconds the pod needs to terminate gracefully.
1117
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
1118
terminationGracePeriodSeconds: ~
1119
## Additional configuration which is not covered by the properties above. (passed through tpl)
1120
additionalConfig: {}
1121
## Additional configuration which is not covered by the properties above.
1122
## Useful, if you need advanced templating inside alertmanagerSpec.
1123
## Otherwise, use alertmanager.alertmanagerSpec.additionalConfig (passed through tpl)
1124
additionalConfigString: ""
1125
## ExtraSecret can be used to store various data in an extra secret
1126
## (use it for example to store hashed basic auth credentials)
1127
extraSecret:
1128
## if not set, name will be auto generated
1129
# name: ""
1130
annotations: {}
1131
data: {}
1132
# auth: |
1133
# foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
1134
# someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
1135
## Using default values from https://github.com/grafana-community/helm-charts/blob/main/charts/grafana/values.yaml
1136
##
1137
grafana:
1138
enabled: true
1139
namespaceOverride: ""
1140
## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
1141
##
1142
forceDeployDatasources: false
1143
## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
1144
##
1145
forceDeployDashboards: false
1146
## Deploy default dashboards
1147
##
1148
defaultDashboardsEnabled: true
1149
## Deploy GrafanaDashboard CRDs that reference dashboards from ConfigMaps when grafana-operator is used
1150
## These settings control how dashboards are integrated with the Grafana Operator
1151
## Note: End user still need to create is own kind: GrafanaDataSource for Prometheus
1152
## eg:
1153
## apiVersion: grafana.integreatly.org/v1beta1
1154
## kind: GrafanaDatasource
1155
## metadata:
1156
## name: prometheus
1157
## annotations: {}
1158
## spec:
1159
## allowCrossNamespaceImport: true
1160
## instanceSelector:
1161
## matchLabels:
1162
## app: grafana
1163
## datasource:
1164
## name: prometheus
1165
## type: prometheus
1166
## access: proxy
1167
## url: http://prometheus-operated.prometheus-stack.svc.cluster.local:9090
1168
## isDefault: true
1169
## jsonData:
1170
## "tlsSkipVerify": true
1171
## "timeInterval": "5s"
1172
##
1173
operator:
1174
## Enable references to ConfigMaps containing dashboards in GrafanaDashboard CRs
1175
## Set to true to allow dashboards to be loaded from ConfigMap references
1176
dashboardsConfigMapRefEnabled: false
1177
## Annotations for GrafanaDashboard Cr
1178
##
1179
annotations: {}
1180
## Labels that should be matched kind: Grafana instance
1181
## Example: { app: grafana, category: dashboard }
1182
##
1183
matchLabels: {}
1184
## How frequently the operator should resync resources (in duration format)
1185
## Controls how often dashboards are reconciled by the operator
1186
##
1187
resyncPeriod: 10m
1188
## Which folder contains all dashboards in Grafana
1189
## This folder will be created on the Root level
1190
## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1191
##
1192
folder: General
1193
## Which UID of the target folder contains all dashboards in Grafana
1194
## This allows you to use subfolder hierarchy
1195
## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1196
##
1197
folderUID: null
1198
## Which GrafanaFolder reference contains all dashboards in Grafana
1199
## This allows you to use subfolder hierarchy.
1200
## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1201
##
1202
folderRef: null
1203
## Timezone for the default dashboards
1204
## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
1205
##
1206
defaultDashboardsTimezone: utc
1207
## Editable flag for the default dashboards
1208
##
1209
defaultDashboardsEditable: true
1210
## Default interval for Grafana dashboards
1211
##
1212
defaultDashboardsInterval: 1m
1213
# Administrator credentials when not using an existing secret (see below)
1214
adminUser: admin
1215
# adminPassword: strongpassword
1216
1217
# Use an existing secret for the admin user.
1218
admin:
1219
## Name of the secret. Can be templated.
1220
existingSecret: ""
1221
userKey: admin-user
1222
passwordKey: admin-password
1223
rbac:
1224
## If true, Grafana PSPs will be created
1225
##
1226
pspEnabled: false
1227
ingress:
1228
## If true, Grafana Ingress will be created
1229
##
1230
enabled: false
1231
## IngressClassName for Grafana Ingress.
1232
## Should be provided if Ingress is enable.
1233
##
1234
# ingressClassName: nginx
1235
1236
## Annotations for Grafana Ingress
1237
##
1238
annotations: {}
1239
# kubernetes.io/ingress.class: nginx
1240
# kubernetes.io/tls-acme: "true"
1241
1242
## Labels to be added to the Ingress
1243
##
1244
labels: {}
1245
## Hostnames.
1246
## Must be provided if Ingress is enable.
1247
##
1248
# hosts:
1249
# - grafana.domain.com
1250
hosts: []
1251
## Path for grafana ingress
1252
path: /
1253
## TLS configuration for grafana Ingress
1254
## Secret must be manually created in the namespace
1255
##
1256
tls: []
1257
# - secretName: grafana-general-tls
1258
# hosts:
1259
# - grafana.example.com
1260
# # To make Grafana persistent (Using Statefulset)
1261
# #
1262
# persistence:
1263
# enabled: true
1264
# type: sts
1265
# storageClassName: "storageClassName"
1266
# accessModes:
1267
# - ReadWriteOnce
1268
# size: 20Gi
1269
# finalizers:
1270
# - kubernetes.io/pvc-protection
1271
serviceAccount:
1272
create: true
1273
autoMount: true
1274
sidecar:
1275
dashboards:
1276
enabled: true
1277
label: grafana_dashboard
1278
labelValue: "1"
1279
# Allow discovery in all namespaces for dashboards
1280
searchNamespace: ALL
1281
# Support for new table panels, when enabled grafana auto migrates the old table panels to newer table panels
1282
enableNewTablePanelSyntax: false
1283
## Annotations for Grafana dashboard configmaps
1284
##
1285
annotations: {}
1286
multicluster:
1287
global:
1288
enabled: false
1289
etcd:
1290
enabled: false
1291
provider:
1292
allowUiUpdates: false
1293
datasources:
1294
enabled: true
1295
defaultDatasourceEnabled: true
1296
isDefaultDatasource: true
1297
name: Prometheus
1298
uid: prometheus
1299
## Extra jsonData properties to add to the datasource
1300
# extraJsonData:
1301
# prometheusType: Prometheus
1302
1303
## URL of prometheus datasource
1304
##
1305
# url: http://prometheus-stack-prometheus:9090/
1306
1307
## Prometheus request timeout in seconds
1308
# timeout: 30
1309
1310
## Query parameters to add, as a URL-encoded string,
1311
## to query Prometheus
1312
# customQueryParameters: ""
1313
1314
# If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
1315
# defaultDatasourceScrapeInterval: 15s
1316
1317
## Annotations for Grafana datasource configmaps
1318
##
1319
annotations: {}
1320
## Set method for HTTP to send query to datasource
1321
httpMethod: POST
1322
## Create datasource for each Pod of Prometheus StatefulSet;
1323
## this uses by default the headless service `prometheus-operated` which is
1324
## created by Prometheus Operator. In case you deployed your own Service for your
1325
## Prometheus instance, you can specify it with the field `prometheusServiceName`
1326
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286
1327
createPrometheusReplicasDatasources: false
1328
prometheusServiceName: prometheus-operated
1329
label: grafana_datasource
1330
labelValue: "1"
1331
## Field with internal link pointing to existing data source in Grafana.
1332
## Can be provisioned via additionalDataSources
1333
exemplarTraceIdDestinations: {}
1334
# datasourceUid: Jaeger
1335
# traceIdLabelName: trace_id
1336
# urlDisplayLabel: View traces
1337
alertmanager:
1338
enabled: true
1339
name: Alertmanager
1340
uid: alertmanager
1341
handleGrafanaManagedAlerts: false
1342
implementation: prometheus
1343
extraConfigmapMounts: []
1344
# - name: certs-configmap
1345
# mountPath: /etc/grafana/ssl/
1346
# configMap: certs-configmap
1347
# readOnly: true
1348
1349
deleteDatasources: []
1350
# - name: example-datasource
1351
# orgId: 1
1352
1353
## Configure additional grafana datasources (passed through tpl)
1354
## ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
1355
additionalDataSources: []
1356
# - name: prometheus-sample
1357
# access: proxy
1358
# basicAuth: true
1359
# secureJsonData:
1360
# basicAuthPassword: pass
1361
# basicAuthUser: daco
1362
# editable: false
1363
# jsonData:
1364
# tlsSkipVerify: true
1365
# orgId: 1
1366
# type: prometheus
1367
# url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
1368
# version: 1
1369
1370
## Configure additional grafana datasources as a templated string (passed through tpl)
1371
## Useful when you need Helm flow control or templating inside the datasource definition
1372
additionalDataSourcesString: ""
1373
# Flag to mark provisioned data sources for deletion if they are no longer configured.
1374
# It takes no effect if data sources are already listed in the deleteDatasources section.
1375
# ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-configuration-file
1376
prune: false
1377
## Passed to grafana subchart and used by servicemonitor below
1378
##
1379
service:
1380
portName: http-web
1381
ipFamilies: []
1382
ipFamilyPolicy: ""
1383
serviceMonitor:
1384
# If true, a ServiceMonitor CRD is created for a prometheus operator
1385
# https://github.com/prometheus-operator/prometheus-operator
1386
#
1387
enabled: true
1388
# Path to use for scraping metrics. Might be different if server.root_url is set
1389
# in grafana.ini
1390
path: "/metrics"
1391
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
1392
1393
# labels for the ServiceMonitor
1394
labels: {}
1395
# Scrape interval. If not set, the Prometheus default scrape interval is used.
1396
#
1397
interval: ""
1398
scheme: http
1399
tlsConfig: {}
1400
scrapeTimeout: 30s
1401
## RelabelConfigs to apply to samples before scraping
1402
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1403
##
1404
relabelings: []
1405
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1406
# separator: ;
1407
# regex: ^(.*)$
1408
# targetLabel: nodename
1409
# replacement: $1
1410
# action: replace
1411
## Flag to disable all the kubernetes component scrapers
1412
##
1413
kubernetesServiceMonitors:
1414
enabled: true
1415
## Component scraping the kube api server
1416
##
1417
kubeApiServer:
1418
enabled: true
1419
tlsConfig:
1420
serverName: kubernetes
1421
insecureSkipVerify: false
1422
serviceMonitor:
1423
enabled: true
1424
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1425
##
1426
interval: ""
1427
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1428
##
1429
sampleLimit: 0
1430
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1431
##
1432
targetLimit: 0
1433
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1434
##
1435
labelLimit: 0
1436
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1437
##
1438
labelNameLengthLimit: 0
1439
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1440
##
1441
labelValueLengthLimit: 0
1442
## proxyUrl: URL of a proxy that should be used for scraping.
1443
##
1444
proxyUrl: ""
1445
jobLabel: component
1446
selector:
1447
matchLabels:
1448
component: apiserver
1449
provider: kubernetes
1450
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1451
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1452
##
1453
metricRelabelings:
1454
# Drop excessively noisy apiserver buckets.
1455
- action: drop
1456
regex: (etcd_request|apiserver_request_slo|apiserver_request_sli|apiserver_request)_duration_seconds_bucket;(0\.15|0\.2|0\.3|0\.35|0\.4|0\.45|0\.6|0\.7|0\.8|0\.9|1\.25|1\.5|1\.75|2|3|3\.5|4|4\.5|6|7|8|9|15|20|40|45|50)(\.0)?
1457
sourceLabels:
1458
- __name__
1459
- le
1460
# - action: keep
1461
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1462
# sourceLabels: [__name__]
1463
1464
## RelabelConfigs to apply to samples before scraping
1465
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1466
##
1467
relabelings: []
1468
# - sourceLabels:
1469
# - __meta_kubernetes_namespace
1470
# - __meta_kubernetes_service_name
1471
# - __meta_kubernetes_endpoint_port_name
1472
# action: keep
1473
# regex: default;kubernetes;https
1474
# - targetLabel: __address__
1475
# replacement: kubernetes.default.svc:443
1476
1477
## Additional labels
1478
##
1479
additionalLabels: {}
1480
# foo: bar
1481
1482
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1483
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1484
targetLabels: []
1485
## Override the job label used for the apiserver.
1486
## This allows users who scrape apiserver metrics under a different job name (e.g. k3s-server via PushProx)
1487
## to align the recording rules and alerts with their actual job label.
1488
jobNameOverride: ""
1489
## Component scraping the kubelet and kubelet-hosted cAdvisor
1490
##
1491
kubelet:
1492
enabled: true
1493
namespace: kube-system
1494
# Overrides the job selector in Grafana dashboards and Prometheus rules
1495
# For k3s clusters, change to k3s-server
1496
jobNameOverride: ""
1497
serviceMonitor:
1498
enabled: true
1499
## Enable scraping /metrics from kubelet's service
1500
kubelet: true
1501
## Attach metadata to discovered targets. Requires Prometheus v2.45 for endpoints created by the operator.
1502
##
1503
attachMetadata:
1504
node: false
1505
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1506
##
1507
interval: ""
1508
## If true, Prometheus use (respect) labels provided by exporter.
1509
##
1510
honorLabels: true
1511
## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape.
1512
##
1513
honorTimestamps: true
1514
## If true, defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false.
1515
## We recommend enabling this if you want the best possible accuracy for container_ metrics scraped from cadvisor.
1516
## For more details see: https://github.com/prometheus-community/helm-charts/pull/5063#issuecomment-2545374849
1517
trackTimestampsStaleness: true
1518
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1519
##
1520
sampleLimit: 0
1521
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1522
##
1523
targetLimit: 0
1524
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1525
##
1526
labelLimit: 0
1527
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1528
##
1529
labelNameLengthLimit: 0
1530
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1531
##
1532
labelValueLengthLimit: 0
1533
## proxyUrl: URL of a proxy that should be used for scraping.
1534
##
1535
proxyUrl: ""
1536
## Enable scraping the kubelet over https. For requirements to enable this see
1537
## https://github.com/prometheus-operator/prometheus-operator/issues/926
1538
##
1539
https: true
1540
## Skip TLS certificate validation when scraping.
1541
## This is enabled by default because kubelet serving certificate deployed by kubeadm is by default self-signed
1542
## ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs
1543
##
1544
insecureSkipVerify: true
1545
## Enable scraping /metrics/probes from kubelet's service
1546
##
1547
probes: true
1548
## Enable scraping /metrics/resource from kubelet's service
1549
## This is disabled by default because container metrics are already exposed by cAdvisor
1550
##
1551
resource: false
1552
# From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
1553
resourcePath: "/metrics/resource/v1alpha1"
1554
## Configure the scrape interval for resource metrics. This is configured to the default Kubelet cAdvisor
1555
## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
1556
## if kubelet.serviceMonitor.interval is not empty.
1557
resourceInterval: 10s
1558
## Enable scraping /metrics/cadvisor from kubelet's service
1559
##
1560
cAdvisor: true
1561
## Configure the scrape interval for cAdvisor. This is configured to the default Kubelet cAdvisor
1562
## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
1563
## if kubelet.serviceMonitor.interval is not empty.
1564
cAdvisorInterval: 10s
1565
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1566
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1567
##
1568
cAdvisorMetricRelabelings:
1569
# Drop less useful container CPU metrics.
1570
- sourceLabels: [__name__]
1571
action: drop
1572
regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
1573
# Drop less useful container / always zero filesystem metrics.
1574
- sourceLabels: [__name__]
1575
action: drop
1576
regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
1577
# Drop less useful / always zero container memory metrics.
1578
- sourceLabels: [__name__]
1579
action: drop
1580
regex: 'container_memory_(mapped_file|swap)'
1581
# Drop less useful container process metrics.
1582
- sourceLabels: [__name__]
1583
action: drop
1584
regex: 'container_(file_descriptors|tasks_state|threads_max)'
1585
# Drop container_memory_failures_total{scope="hierarchy"} metrics,
1586
# we only need the container scope.
1587
- sourceLabels: [__name__, scope]
1588
action: drop
1589
regex: 'container_memory_failures_total;hierarchy'
1590
# Drop container_network_... metrics that match various interfaces that
1591
# correspond to CNI and similar interfaces. This avoids capturing network
1592
# metrics for host network containers.
1593
- sourceLabels: [__name__, interface]
1594
action: drop
1595
regex: 'container_network_.*;(cali|cilium|cni|lxc|nodelocaldns|tunl).*'
1596
# Drop container spec metrics that overlap with kube-state-metrics.
1597
- sourceLabels: [__name__]
1598
action: drop
1599
regex: 'container_spec.*'
1600
# Drop cgroup metrics with no pod.
1601
- sourceLabels: [id, pod]
1602
action: drop
1603
regex: '.+;'
1604
# - sourceLabels: [__name__, image]
1605
# separator: ;
1606
# regex: container_([a-z_]+);
1607
# replacement: $1
1608
# action: drop
1609
# - sourceLabels: [__name__]
1610
# separator: ;
1611
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1612
# replacement: $1
1613
# action: drop
1614
1615
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1616
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1617
##
1618
probesMetricRelabelings: []
1619
# - sourceLabels: [__name__, image]
1620
# separator: ;
1621
# regex: container_([a-z_]+);
1622
# replacement: $1
1623
# action: drop
1624
# - sourceLabels: [__name__]
1625
# separator: ;
1626
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1627
# replacement: $1
1628
# action: drop
1629
1630
## RelabelConfigs to apply to samples before scraping
1631
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1632
##
1633
## metrics_path is required to match upstream rules and charts
1634
cAdvisorRelabelings:
1635
- action: replace
1636
sourceLabels: [__metrics_path__]
1637
targetLabel: metrics_path
1638
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1639
# separator: ;
1640
# regex: ^(.*)$
1641
# targetLabel: nodename
1642
# replacement: $1
1643
# action: replace
1644
1645
## RelabelConfigs to apply to samples before scraping
1646
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1647
##
1648
probesRelabelings:
1649
- action: replace
1650
sourceLabels: [__metrics_path__]
1651
targetLabel: metrics_path
1652
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1653
# separator: ;
1654
# regex: ^(.*)$
1655
# targetLabel: nodename
1656
# replacement: $1
1657
# action: replace
1658
1659
## RelabelConfigs to apply to samples before scraping
1660
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1661
##
1662
resourceRelabelings:
1663
- action: replace
1664
sourceLabels: [__metrics_path__]
1665
targetLabel: metrics_path
1666
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1667
# separator: ;
1668
# regex: ^(.*)$
1669
# targetLabel: nodename
1670
# replacement: $1
1671
# action: replace
1672
1673
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1674
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1675
##
1676
metricRelabelings:
1677
# Reduce bucket cardinality of kubelet storage operations.
1678
- action: drop
1679
sourceLabels: [__name__, le]
1680
regex: (csi_operations|storage_operation_duration)_seconds_bucket;(0.25|2.5|15|25|120|600)(\.0)?
1681
# - sourceLabels: [__name__, image]
1682
# separator: ;
1683
# regex: container_([a-z_]+);
1684
# replacement: $1
1685
# action: drop
1686
# - sourceLabels: [__name__]
1687
# separator: ;
1688
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1689
# replacement: $1
1690
# action: drop
1691
1692
## RelabelConfigs to apply to samples before scraping
1693
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1694
##
1695
## metrics_path is required to match upstream rules and charts
1696
relabelings:
1697
- action: replace
1698
sourceLabels: [__metrics_path__]
1699
targetLabel: metrics_path
1700
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1701
# separator: ;
1702
# regex: ^(.*)$
1703
# targetLabel: nodename
1704
# replacement: $1
1705
# action: replace
1706
1707
## Additional labels
1708
##
1709
additionalLabels: {}
1710
# foo: bar
1711
1712
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1713
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1714
targetLabels: []
1715
## Component scraping the kube controller manager
1716
##
1717
kubeControllerManager:
1718
enabled: true
1719
# Overrides the job selector in Grafana dashboards and Prometheus rules
1720
# For k3s clusters, change to k3s-server
1721
jobNameOverride: ""
1722
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
1723
##
1724
endpoints: []
1725
# - 10.141.4.22
1726
# - 10.141.4.23
1727
# - 10.141.4.24
1728
1729
## If using kubeControllerManager.endpoints only the port and targetPort are used
1730
##
1731
service:
1732
enabled: true
1733
## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
1734
## of default port in Kubernetes 1.22.
1735
##
1736
port: null
1737
targetPort: null
1738
ipDualStack:
1739
enabled: false
1740
ipFamilies: ["IPv6", "IPv4"]
1741
ipFamilyPolicy: "PreferDualStack"
1742
# selector:
1743
# component: kube-controller-manager
1744
serviceMonitor:
1745
enabled: true
1746
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1747
##
1748
interval: ""
1749
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1750
##
1751
sampleLimit: 0
1752
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1753
##
1754
targetLimit: 0
1755
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1756
##
1757
labelLimit: 0
1758
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1759
##
1760
labelNameLengthLimit: 0
1761
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1762
##
1763
labelValueLengthLimit: 0
1764
## proxyUrl: URL of a proxy that should be used for scraping.
1765
##
1766
proxyUrl: ""
1767
## port: Name of the port the metrics will be scraped from
1768
##
1769
port: http-metrics
1770
jobLabel: jobLabel
1771
selector: {}
1772
# matchLabels:
1773
# component: kube-controller-manager
1774
1775
## Enable scraping kube-controller-manager over https.
1776
## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
1777
## If null or unset, the value is determined dynamically based on target Kubernetes version.
1778
##
1779
https: null
1780
# Skip TLS certificate validation when scraping
1781
insecureSkipVerify: null
1782
# Name of the server to use when validating TLS certificate
1783
serverName: null
1784
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1785
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1786
##
1787
metricRelabelings: []
1788
# - action: keep
1789
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1790
# sourceLabels: [__name__]
1791
1792
## RelabelConfigs to apply to samples before scraping
1793
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1794
##
1795
relabelings: []
1796
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1797
# separator: ;
1798
# regex: ^(.*)$
1799
# targetLabel: nodename
1800
# replacement: $1
1801
# action: replace
1802
1803
## Additional labels
1804
##
1805
additionalLabels: {}
1806
# foo: bar
1807
1808
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1809
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1810
targetLabels: []
1811
## Component scraping coreDns. Use either this or kubeDns
1812
##
1813
coreDns:
1814
enabled: true
1815
service:
1816
enabled: true
1817
port: 9153
1818
targetPort: 9153
1819
ipDualStack:
1820
enabled: false
1821
ipFamilies: ["IPv6", "IPv4"]
1822
ipFamilyPolicy: "PreferDualStack"
1823
# selector:
1824
# k8s-app: kube-dns
1825
serviceMonitor:
1826
enabled: true
1827
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1828
##
1829
interval: ""
1830
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1831
##
1832
sampleLimit: 0
1833
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1834
##
1835
targetLimit: 0
1836
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1837
##
1838
labelLimit: 0
1839
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1840
##
1841
labelNameLengthLimit: 0
1842
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1843
##
1844
labelValueLengthLimit: 0
1845
## proxyUrl: URL of a proxy that should be used for scraping.
1846
##
1847
proxyUrl: ""
1848
## port: Name of the port the metrics will be scraped from
1849
##
1850
port: http-metrics
1851
jobLabel: jobLabel
1852
selector: {}
1853
# matchLabels:
1854
# k8s-app: kube-dns
1855
1856
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1857
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1858
##
1859
metricRelabelings: []
1860
# - action: keep
1861
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1862
# sourceLabels: [__name__]
1863
1864
## RelabelConfigs to apply to samples before scraping
1865
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1866
##
1867
relabelings: []
1868
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1869
# separator: ;
1870
# regex: ^(.*)$
1871
# targetLabel: nodename
1872
# replacement: $1
1873
# action: replace
1874
1875
## Additional labels
1876
##
1877
additionalLabels: {}
1878
# foo: bar
1879
1880
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1881
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1882
targetLabels: []
1883
## File containing bearer token to be used when scraping targets
1884
## Empty value do not send any bearer token.
1885
##
1886
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
1887
## Component scraping kubeDns. Use either this or coreDns
1888
##
1889
kubeDns:
1890
enabled: false
1891
service:
1892
dnsmasq:
1893
port: 10054
1894
targetPort: 10054
1895
skydns:
1896
port: 10055
1897
targetPort: 10055
1898
ipDualStack:
1899
enabled: false
1900
ipFamilies: ["IPv6", "IPv4"]
1901
ipFamilyPolicy: "PreferDualStack"
1902
# selector:
1903
# k8s-app: kube-dns
1904
serviceMonitor:
1905
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1906
##
1907
interval: ""
1908
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1909
##
1910
sampleLimit: 0
1911
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1912
##
1913
targetLimit: 0
1914
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1915
##
1916
labelLimit: 0
1917
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1918
##
1919
labelNameLengthLimit: 0
1920
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1921
##
1922
labelValueLengthLimit: 0
1923
## proxyUrl: URL of a proxy that should be used for scraping.
1924
##
1925
proxyUrl: ""
1926
jobLabel: jobLabel
1927
selector: {}
1928
# matchLabels:
1929
# k8s-app: kube-dns
1930
1931
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1932
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1933
##
1934
metricRelabelings: []
1935
# - action: keep
1936
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1937
# sourceLabels: [__name__]
1938
1939
## RelabelConfigs to apply to samples before scraping
1940
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1941
##
1942
relabelings: []
1943
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1944
# separator: ;
1945
# regex: ^(.*)$
1946
# targetLabel: nodename
1947
# replacement: $1
1948
# action: replace
1949
1950
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1951
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1952
##
1953
dnsmasqMetricRelabelings: []
1954
# - action: keep
1955
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1956
# sourceLabels: [__name__]
1957
1958
## RelabelConfigs to apply to samples before scraping
1959
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1960
##
1961
dnsmasqRelabelings: []
1962
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1963
# separator: ;
1964
# regex: ^(.*)$
1965
# targetLabel: nodename
1966
# replacement: $1
1967
# action: replace
1968
1969
## Additional labels
1970
##
1971
additionalLabels: {}
1972
# foo: bar
1973
1974
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1975
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1976
targetLabels: []
1977
## File containing bearer token to be used when scraping targets
1978
## Empty value do not send any bearer token.
1979
##
1980
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
1981
## Component scraping etcd
1982
##
1983
kubeEtcd:
1984
enabled: true
1985
## If your etcd is not deployed as a pod, specify IPs it can be found on
1986
##
1987
endpoints: []
1988
# - 10.141.4.22
1989
# - 10.141.4.23
1990
# - 10.141.4.24
1991
1992
## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
1993
##
1994
service:
1995
enabled: true
1996
port: 2381
1997
targetPort: 2381
1998
ipDualStack:
1999
enabled: false
2000
ipFamilies: ["IPv6", "IPv4"]
2001
ipFamilyPolicy: "PreferDualStack"
2002
# selector:
2003
# component: etcd
2004
## Configure secure access to the etcd cluster by loading a secret into prometheus and
2005
## specifying security configuration below. For example, with a secret named etcd-client-cert
2006
##
2007
## serviceMonitor:
2008
## scheme: https
2009
## insecureSkipVerify: false
2010
## serverName: localhost
2011
## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
2012
## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
2013
## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
2014
##
2015
serviceMonitor:
2016
enabled: true
2017
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2018
##
2019
interval: ""
2020
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2021
##
2022
sampleLimit: 0
2023
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2024
##
2025
targetLimit: 0
2026
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2027
##
2028
labelLimit: 0
2029
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2030
##
2031
labelNameLengthLimit: 0
2032
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2033
##
2034
labelValueLengthLimit: 0
2035
## proxyUrl: URL of a proxy that should be used for scraping.
2036
##
2037
proxyUrl: ""
2038
scheme: http
2039
insecureSkipVerify: false
2040
serverName: ""
2041
caFile: ""
2042
certFile: ""
2043
keyFile: ""
2044
## port: Name of the port the metrics will be scraped from
2045
##
2046
port: http-metrics
2047
jobLabel: jobLabel
2048
selector: {}
2049
# matchLabels:
2050
# component: etcd
2051
2052
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2053
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2054
##
2055
metricRelabelings: []
2056
# - action: keep
2057
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2058
# sourceLabels: [__name__]
2059
2060
## RelabelConfigs to apply to samples before scraping
2061
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2062
##
2063
relabelings: []
2064
# - sourceLabels: [__meta_kubernetes_pod_node_name]
2065
# separator: ;
2066
# regex: ^(.*)$
2067
# targetLabel: nodename
2068
# replacement: $1
2069
# action: replace
2070
2071
## Additional labels
2072
##
2073
additionalLabels: {}
2074
# foo: bar
2075
2076
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2077
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2078
targetLabels: []
2079
## File containing bearer token to be used when scraping targets
2080
## Empty value do not send any bearer token.
2081
##
2082
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
2083
## Component scraping kube scheduler
2084
##
2085
kubeScheduler:
2086
enabled: true
2087
# Overrides the job selector in Grafana dashboards and Prometheus rules
2088
# For k3s clusters, change to k3s-server
2089
jobNameOverride: ""
2090
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
2091
##
2092
endpoints: []
2093
# - 10.141.4.22
2094
# - 10.141.4.23
2095
# - 10.141.4.24
2096
2097
## If using kubeScheduler.endpoints only the port and targetPort are used
2098
##
2099
service:
2100
enabled: true
2101
## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
2102
## of default port in Kubernetes 1.23.
2103
##
2104
port: null
2105
targetPort: null
2106
ipDualStack:
2107
enabled: false
2108
ipFamilies: ["IPv6", "IPv4"]
2109
ipFamilyPolicy: "PreferDualStack"
2110
# selector:
2111
# component: kube-scheduler
2112
serviceMonitor:
2113
enabled: true
2114
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2115
##
2116
interval: ""
2117
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2118
##
2119
sampleLimit: 0
2120
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2121
##
2122
targetLimit: 0
2123
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2124
##
2125
labelLimit: 0
2126
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2127
##
2128
labelNameLengthLimit: 0
2129
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2130
##
2131
labelValueLengthLimit: 0
2132
## proxyUrl: URL of a proxy that should be used for scraping.
2133
##
2134
proxyUrl: ""
2135
## Enable scraping kube-scheduler over https.
2136
## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
2137
## If null or unset, the value is determined dynamically based on target Kubernetes version.
2138
##
2139
https: null
2140
## port: Name of the port the metrics will be scraped from
2141
##
2142
port: http-metrics
2143
jobLabel: jobLabel
2144
selector: {}
2145
# matchLabels:
2146
# component: kube-scheduler
2147
2148
## Skip TLS certificate validation when scraping
2149
insecureSkipVerify: null
2150
## Name of the server to use when validating TLS certificate
2151
serverName: null
2152
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2153
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2154
##
2155
metricRelabelings: []
2156
# - action: keep
2157
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2158
# sourceLabels: [__name__]
2159
2160
## RelabelConfigs to apply to samples before scraping
2161
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2162
##
2163
relabelings: []
2164
# - sourceLabels: [__meta_kubernetes_pod_node_name]
2165
# separator: ;
2166
# regex: ^(.*)$
2167
# targetLabel: nodename
2168
# replacement: $1
2169
# action: replace
2170
2171
## Additional labels
2172
##
2173
additionalLabels: {}
2174
# foo: bar
2175
2176
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2177
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2178
targetLabels: []
2179
## Component scraping kube proxy
2180
##
2181
kubeProxy:
2182
enabled: true
2183
# Overrides the job selector in Grafana dashboards and Prometheus rules
2184
# For k3s clusters, change to k3s-server
2185
jobNameOverride: ""
2186
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
2187
##
2188
endpoints: []
2189
# - 10.141.4.22
2190
# - 10.141.4.23
2191
# - 10.141.4.24
2192
2193
service:
2194
enabled: true
2195
port: 10249
2196
targetPort: 10249
2197
ipDualStack:
2198
enabled: false
2199
ipFamilies: ["IPv6", "IPv4"]
2200
ipFamilyPolicy: "PreferDualStack"
2201
# selector:
2202
# k8s-app: kube-proxy
2203
serviceMonitor:
2204
enabled: true
2205
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2206
##
2207
interval: ""
2208
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2209
##
2210
sampleLimit: 0
2211
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2212
##
2213
targetLimit: 0
2214
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2215
##
2216
labelLimit: 0
2217
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2218
##
2219
labelNameLengthLimit: 0
2220
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2221
##
2222
labelValueLengthLimit: 0
2223
## proxyUrl: URL of a proxy that should be used for scraping.
2224
##
2225
proxyUrl: ""
2226
## port: Name of the port the metrics will be scraped from
2227
##
2228
port: http-metrics
2229
jobLabel: jobLabel
2230
selector: {}
2231
# matchLabels:
2232
# k8s-app: kube-proxy
2233
2234
## Enable scraping kube-proxy over https.
2235
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
2236
##
2237
https: false
2238
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2239
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2240
##
2241
metricRelabelings: []
2242
# - action: keep
2243
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2244
# sourceLabels: [__name__]
2245
2246
## RelabelConfigs to apply to samples before scraping
2247
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2248
##
2249
relabelings: []
2250
# - action: keep
2251
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2252
# sourceLabels: [__name__]
2253
2254
## Additional labels
2255
##
2256
additionalLabels: {}
2257
# foo: bar
2258
2259
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2260
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2261
targetLabels: []
2262
## File containing bearer token to be used when scraping targets
2263
## Empty value do not send any bearer token.
2264
##
2265
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
2266
## Component scraping kube state metrics
2267
##
2268
kubeStateMetrics:
2269
enabled: true
2270
## Configuration for kube-state-metrics subchart
2271
##
2272
kube-state-metrics:
2273
## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box
2274
releaseLabel: true
2275
## Enable scraping via kubernetes-service-endpoints
2276
## Disabled by default as we service monitor is enabled below
2277
##
2278
prometheusScrape: false
2279
prometheus:
2280
monitor:
2281
## Enable scraping via service monitor
2282
## Disable to prevent duplication if you enable prometheusScrape above
2283
enabled: true
2284
## kube-state-metrics endpoint
2285
http:
2286
## Keep labels from scraped data, overriding server-side labels
2287
honorLabels: true
2288
## selfMonitor endpoint
2289
metrics:
2290
## Keep labels from scraped data, overriding server-side labels
2291
honorLabels: true
2292
## Deploy node exporter as a daemonset to all nodes
2293
##
2294
nodeExporter:
2295
enabled: true
2296
operatingSystems:
2297
linux:
2298
enabled: true
2299
aix:
2300
enabled: true
2301
darwin:
2302
enabled: true
2303
## ForceDeployDashboard Create dashboard configmap even if nodeExporter deployment has been disabled
2304
##
2305
forceDeployDashboards: false
2306
## Configuration for prometheus-node-exporter subchart
2307
##
2308
prometheus-node-exporter:
2309
namespaceOverride: ""
2310
podLabels:
2311
## Add the 'node-exporter' label to be used by serviceMonitor and podMonitor to match standard common usage in rules and grafana dashboards
2312
##
2313
jobLabel: node-exporter
2314
releaseLabel: true
2315
extraArgs:
2316
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/containerd/.+|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
2317
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs|erofs)$
2318
service:
2319
portName: http-metrics
2320
ipDualStack:
2321
enabled: false
2322
ipFamilies: ["IPv6", "IPv4"]
2323
ipFamilyPolicy: "PreferDualStack"
2324
labels:
2325
jobLabel: node-exporter
2326
image:
2327
distroless: true
2328
prometheus:
2329
monitor:
2330
enabled: true
2331
jobLabel: jobLabel
2332
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2333
##
2334
interval: ""
2335
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2336
##
2337
sampleLimit: 0
2338
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2339
##
2340
targetLimit: 0
2341
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2342
##
2343
labelLimit: 0
2344
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2345
##
2346
labelNameLengthLimit: 0
2347
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2348
##
2349
labelValueLengthLimit: 0
2350
## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
2351
##
2352
scrapeTimeout: ""
2353
## proxyUrl: URL of a proxy that should be used for scraping.
2354
##
2355
proxyUrl: ""
2356
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2357
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2358
##
2359
metricRelabelings: []
2360
# - sourceLabels: [__name__]
2361
# separator: ;
2362
# regex: ^node_mountstats_nfs_(event|operations|transport)_.+
2363
# replacement: $1
2364
# action: drop
2365
2366
## RelabelConfigs to apply to samples before scraping
2367
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2368
##
2369
relabelings: []
2370
# - sourceLabels: [__meta_kubernetes_pod_node_name]
2371
# separator: ;
2372
# regex: ^(.*)$
2373
# targetLabel: nodename
2374
# replacement: $1
2375
# action: replace
2376
## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above.
2377
##
2378
# attachMetadata:
2379
# node: false
2380
2381
podMonitor:
2382
enabled: false
2383
jobLabel: jobLabel
2384
rbac:
2385
## If true, create PSPs for node-exporter
2386
##
2387
pspEnabled: false
2388
## Manages Prometheus and Alertmanager components
2389
##
2390
prometheusOperator:
2391
enabled: true
2392
## Use '{{ template "kube-prometheus-stack.fullname" . }}-operator' by default
2393
fullnameOverride: ""
2394
## Number of old replicasets to retain ##
2395
## The default value is 10, 0 will garbage-collect old replicasets ##
2396
revisionHistoryLimit: 10
2397
## Strategy of the deployment
2398
##
2399
strategy: {}
2400
## Prometheus-Operator v0.39.0 and later support TLS natively.
2401
##
2402
tls:
2403
enabled: true
2404
# Value must match version names from https://pkg.go.dev/crypto/tls#pkg-constants
2405
tlsMinVersion: VersionTLS13
2406
# The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
2407
internalPort: 10250
2408
## Liveness probe for the prometheusOperator deployment
2409
##
2410
livenessProbe:
2411
enabled: true
2412
failureThreshold: 3
2413
initialDelaySeconds: 0
2414
periodSeconds: 10
2415
successThreshold: 1
2416
timeoutSeconds: 1
2417
## Readiness probe for the prometheusOperator deployment
2418
##
2419
readinessProbe:
2420
enabled: true
2421
failureThreshold: 3
2422
initialDelaySeconds: 0
2423
periodSeconds: 10
2424
successThreshold: 1
2425
timeoutSeconds: 1
2426
## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
2427
## rules from making their way into prometheus and potentially preventing the container from starting
2428
admissionWebhooks:
2429
## Valid values: Fail, Ignore, IgnoreOnInstallOnly
2430
## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail"
2431
failurePolicy: ""
2432
## The default timeoutSeconds is 10 and the maximum value is 30.
2433
timeoutSeconds: 10
2434
enabled: true
2435
## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
2436
## If unspecified, system trust roots on the apiserver are used.
2437
caBundle: ""
2438
## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
2439
## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
2440
## certs ahead of time if you wish.
2441
##
2442
annotations: {}
2443
# argocd.argoproj.io/hook: PreSync
2444
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
2445
2446
namespaceSelector: {}
2447
objectSelector: {}
2448
matchConditions: {}
2449
mutatingWebhookConfiguration:
2450
annotations: {}
2451
# argocd.argoproj.io/hook: PreSync
2452
validatingWebhookConfiguration:
2453
annotations: {}
2454
# argocd.argoproj.io/hook: PreSync
2455
deployment:
2456
enabled: false
2457
## Number of replicas
2458
##
2459
replicas: 1
2460
## Strategy of the deployment
2461
##
2462
strategy: {}
2463
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
2464
podDisruptionBudget:
2465
enabled: false
2466
minAvailable: 1
2467
# maxUnavailable: ""
2468
unhealthyPodEvictionPolicy: AlwaysAllow
2469
## Number of old replicasets to retain ##
2470
## The default value is 10, 0 will garbage-collect old replicasets ##
2471
revisionHistoryLimit: 10
2472
## Prometheus-Operator v0.39.0 and later support TLS natively.
2473
##
2474
tls:
2475
enabled: true
2476
# Value must match version names from https://pkg.go.dev/crypto/tls#pkg-constants
2477
tlsMinVersion: VersionTLS13
2478
# The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
2479
internalPort: 10250
2480
## Service account for Prometheus Operator Webhook to use.
2481
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2482
##
2483
serviceAccount:
2484
annotations: {}
2485
automountServiceAccountToken: false
2486
create: true
2487
name: ""
2488
## Configuration for Prometheus operator Webhook service
2489
##
2490
service:
2491
annotations: {}
2492
labels: {}
2493
clusterIP: ""
2494
ipDualStack:
2495
enabled: false
2496
ipFamilies: ["IPv6", "IPv4"]
2497
ipFamilyPolicy: "PreferDualStack"
2498
## Port to expose on each node
2499
## Only used if service.type is 'NodePort'
2500
##
2501
nodePort: 31080
2502
nodePortTls: 31443
2503
## Additional ports to open for Prometheus operator Webhook service
2504
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
2505
##
2506
additionalPorts: []
2507
## Loadbalancer IP
2508
## Only use if service.type is "LoadBalancer"
2509
##
2510
loadBalancerIP: ""
2511
loadBalancerSourceRanges: []
2512
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
2513
##
2514
externalTrafficPolicy: Cluster
2515
## Service type
2516
## NodePort, ClusterIP, LoadBalancer
2517
##
2518
type: ClusterIP
2519
## List of IP addresses at which the Prometheus server service is available
2520
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
2521
##
2522
externalIPs: []
2523
# ## Labels to add to the operator webhook deployment
2524
# ##
2525
labels: {}
2526
## Annotations to add to the operator webhook deployment
2527
##
2528
annotations: {}
2529
## Labels to add to the operator webhook pod
2530
##
2531
podLabels: {}
2532
## Annotations to add to the operator webhook pod
2533
##
2534
podAnnotations: {}
2535
## Assign a PriorityClassName to pods if set
2536
# priorityClassName: ""
2537
2538
## Define Log Format
2539
# Use logfmt (default) or json logging
2540
# logFormat: logfmt
2541
2542
## Decrease log verbosity to errors only
2543
# logLevel: error
2544
2545
## Prometheus-operator webhook image
2546
##
2547
image:
2548
registry: cgr.dev
2549
repository: chainguard-private/prometheus-admission-webhook
2550
# if not set appVersion field from Chart.yaml is used
2551
tag: latest
2552
sha: sha256:82fe3e3be35b1e38eb4b7203389e4e938e1b1b756f3dba9d25866f853f6f98e9
2553
pullPolicy: IfNotPresent
2554
## Define Log Format
2555
# Use logfmt (default) or json logging
2556
# logFormat: logfmt
2557
2558
## Decrease log verbosity to errors only
2559
# logLevel: error
2560
2561
## Liveness probe
2562
##
2563
livenessProbe:
2564
enabled: true
2565
failureThreshold: 3
2566
initialDelaySeconds: 30
2567
periodSeconds: 10
2568
successThreshold: 1
2569
timeoutSeconds: 1
2570
## Readiness probe
2571
##
2572
readinessProbe:
2573
enabled: true
2574
failureThreshold: 3
2575
initialDelaySeconds: 5
2576
periodSeconds: 10
2577
successThreshold: 1
2578
timeoutSeconds: 1
2579
## Resource limits & requests
2580
##
2581
resources: {}
2582
# limits:
2583
# cpu: 200m
2584
# memory: 200Mi
2585
# requests:
2586
# cpu: 100m
2587
# memory: 100Mi
2588
2589
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
2590
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
2591
##
2592
hostNetwork: false
2593
## Define which Nodes the Pods are scheduled on.
2594
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
2595
##
2596
nodeSelector: {}
2597
## Tolerations for use with node taints
2598
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
2599
##
2600
tolerations: []
2601
# - key: "key"
2602
# operator: "Equal"
2603
# value: "value"
2604
# effect: "NoSchedule"
2605
2606
## Assign custom affinity rules to the prometheus operator
2607
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
2608
##
2609
affinity: {}
2610
# nodeAffinity:
2611
# requiredDuringSchedulingIgnoredDuringExecution:
2612
# nodeSelectorTerms:
2613
# - matchExpressions:
2614
# - key: kubernetes.io/e2e-az-name
2615
# operator: In
2616
# values:
2617
# - e2e-az1
2618
# - e2e-az2
2619
dnsConfig: {}
2620
# nameservers:
2621
# - 1.2.3.4
2622
# searches:
2623
# - ns1.svc.cluster-domain.example
2624
# - my.dns.search.suffix
2625
# options:
2626
# - name: ndots
2627
# value: "2"
2628
# - name: edns0
2629
securityContext:
2630
fsGroup: 65534
2631
runAsGroup: 65534
2632
runAsNonRoot: true
2633
runAsUser: 65534
2634
seccompProfile:
2635
type: RuntimeDefault
2636
## Container-specific security context configuration
2637
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2638
##
2639
containerSecurityContext:
2640
allowPrivilegeEscalation: false
2641
readOnlyRootFilesystem: true
2642
capabilities:
2643
drop:
2644
- ALL
2645
## If false then the user will opt out of automounting API credentials.
2646
##
2647
automountServiceAccountToken: true
2648
patch:
2649
enabled: true
2650
image:
2651
registry: cgr.dev
2652
repository: chainguard-private/kube-webhook-certgen
2653
tag: latest
2654
sha: sha256:127961d6034e96d92cf9ae2fd460e39fa790eb5d1ebcb80acf0e833bc9b22546
2655
pullPolicy: IfNotPresent
2656
resources: {}
2657
## Provide a priority class name to the webhook patching job
2658
##
2659
priorityClassName: ""
2660
ttlSecondsAfterFinished: 60
2661
annotations: {}
2662
# argocd.argoproj.io/hook: PreSync
2663
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
2664
podAnnotations: {}
2665
nodeSelector: {}
2666
affinity: {}
2667
tolerations: []
2668
## SecurityContext holds pod-level security attributes and common container settings.
2669
## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false
2670
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2671
##
2672
securityContext:
2673
runAsGroup: 2000
2674
runAsNonRoot: true
2675
runAsUser: 2000
2676
seccompProfile:
2677
type: RuntimeDefault
2678
## Service account for Prometheus Operator Webhook Job Patch to use.
2679
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2680
##
2681
serviceAccount:
2682
create: true
2683
annotations: {}
2684
automountServiceAccountToken: true
2685
# Security context for create job container
2686
createSecretJob:
2687
securityContext:
2688
allowPrivilegeEscalation: false
2689
readOnlyRootFilesystem: true
2690
capabilities:
2691
drop:
2692
- ALL
2693
# Security context for patch job container
2694
patchWebhookJob:
2695
securityContext:
2696
allowPrivilegeEscalation: false
2697
readOnlyRootFilesystem: true
2698
capabilities:
2699
drop:
2700
- ALL
2701
# Use certmanager to generate webhook certs
2702
certManager:
2703
enabled: false
2704
# self-signed root certificate
2705
rootCert:
2706
duration: "" # default to be 5y
2707
# -- Set the revisionHistoryLimit on the Certificate. See
2708
# https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
2709
# Defaults to nil.
2710
revisionHistoryLimit:
2711
admissionCert:
2712
duration: "" # default to be 1y
2713
# -- Set the revisionHistoryLimit on the Certificate. See
2714
# https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
2715
# Defaults to nil.
2716
revisionHistoryLimit:
2717
# issuerRef:
2718
# name: "issuer"
2719
# kind: "ClusterIssuer"
2720
## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
2721
## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
2722
##
2723
namespaces: {}
2724
# releaseNamespace: true
2725
# additional:
2726
# - kube-system
2727
2728
## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
2729
##
2730
denyNamespaces: []
2731
## Filter namespaces to look for prometheus-operator custom resources
2732
##
2733
alertmanagerInstanceNamespaces: []
2734
alertmanagerConfigNamespaces: []
2735
prometheusInstanceNamespaces: []
2736
thanosRulerInstanceNamespaces: []
2737
## The clusterDomain value will be added to the cluster.peer option of the alertmanager.
2738
## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value)
2739
## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094
2740
##
2741
# clusterDomain: "cluster.local"
2742
networkPolicy:
2743
## Enable creation of NetworkPolicy resources.
2744
##
2745
enabled: false
2746
## Flavor of the network policy to use.
2747
# Can be:
2748
# * kubernetes for networking.k8s.io/v1/NetworkPolicy
2749
# * cilium for cilium.io/v2/CiliumNetworkPolicy
2750
flavor: kubernetes
2751
# cilium:
2752
# egress:
2753
2754
## match labels used in selector
2755
# matchLabels: {}
2756
## Service account for Prometheus Operator to use.
2757
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2758
##
2759
serviceAccount:
2760
create: true
2761
name: ""
2762
automountServiceAccountToken: true
2763
annotations: {}
2764
# -- terminationGracePeriodSeconds for container lifecycle hook
2765
terminationGracePeriodSeconds: 30
2766
# -- Specify lifecycle hooks for the controller
2767
lifecycle: {}
2768
## Configuration for Prometheus operator service
2769
##
2770
service:
2771
annotations: {}
2772
labels: {}
2773
clusterIP: ""
2774
ipDualStack:
2775
enabled: false
2776
ipFamilies: ["IPv6", "IPv4"]
2777
ipFamilyPolicy: "PreferDualStack"
2778
## Port to expose on each node
2779
## Only used if service.type is 'NodePort'
2780
##
2781
nodePort: 30080
2782
nodePortTls: 30443
2783
## Additional ports to open for Prometheus operator service
2784
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
2785
##
2786
additionalPorts: []
2787
## Loadbalancer IP
2788
## Only use if service.type is "LoadBalancer"
2789
##
2790
loadBalancerIP: ""
2791
loadBalancerSourceRanges: []
2792
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
2793
##
2794
externalTrafficPolicy: Cluster
2795
## Service type
2796
## NodePort, ClusterIP, LoadBalancer
2797
##
2798
type: ClusterIP
2799
## List of IP addresses at which the Prometheus server service is available
2800
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
2801
##
2802
externalIPs: []
2803
# ## Labels to add to the operator deployment
2804
# ##
2805
labels: {}
2806
## Annotations to add to the operator deployment
2807
##
2808
annotations: {}
2809
## Labels to add to the operator pod
2810
##
2811
podLabels: {}
2812
## Annotations to add to the operator pod
2813
##
2814
podAnnotations: {}
2815
## Assign a podDisruptionBudget to the operator
2816
##
2817
podDisruptionBudget:
2818
enabled: false
2819
minAvailable: 1
2820
# maxUnavailable: ""
2821
unhealthyPodEvictionPolicy: AlwaysAllow
2822
## Assign a PriorityClassName to pods if set
2823
# priorityClassName: ""
2824
2825
## Define Log Format
2826
# Use logfmt (default) or json logging
2827
# logFormat: logfmt
2828
2829
## Decrease log verbosity to errors only
2830
# logLevel: error
2831
kubeletService:
2832
## If true, the operator will create and maintain a service for scraping kubelets
2833
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md
2834
##
2835
enabled: true
2836
namespace: kube-system
2837
selector: ""
2838
## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default
2839
name: ""
2840
## Create Endpoints objects for kubelet targets.
2841
kubeletEndpointsEnabled: true
2842
## Create EndpointSlice objects for kubelet targets.
2843
kubeletEndpointSliceEnabled: false
2844
## Extra arguments to pass to prometheusOperator
2845
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/operator.md
2846
extraArgs: []
2847
# - --labels="cluster=talos-cluster"
2848
2849
## Create a servicemonitor for the operator
2850
##
2851
serviceMonitor:
2852
## If true, create a serviceMonitor for prometheus operator
2853
##
2854
selfMonitor: true
2855
## Labels for ServiceMonitor
2856
additionalLabels: {}
2857
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2858
##
2859
interval: ""
2860
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2861
##
2862
sampleLimit: 0
2863
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2864
##
2865
targetLimit: 0
2866
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2867
##
2868
labelLimit: 0
2869
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2870
##
2871
labelNameLengthLimit: 0
2872
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2873
##
2874
labelValueLengthLimit: 0
2875
## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
2876
scrapeTimeout: ""
2877
## Metric relabel configs to apply to samples before ingestion.
2878
##
2879
metricRelabelings: []
2880
# - action: keep
2881
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2882
# sourceLabels: [__name__]
2883
2884
# relabel configs to apply to samples before ingestion.
2885
##
2886
relabelings: []
2887
# - sourceLabels: [__meta_kubernetes_pod_node_name]
2888
# separator: ;
2889
# regex: ^(.*)$
2890
# targetLabel: nodename
2891
# replacement: $1
2892
# action: replace
2893
## Resource limits & requests
2894
##
2895
resources: {}
2896
# limits:
2897
# cpu: 200m
2898
# memory: 200Mi
2899
# requests:
2900
# cpu: 100m
2901
# memory: 100Mi
2902
2903
## Operator Environment
2904
## env:
2905
## VARIABLE: value
2906
env:
2907
GOGC: "30"
2908
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
2909
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
2910
##
2911
hostNetwork: false
2912
## Define which Nodes the Pods are scheduled on.
2913
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
2914
##
2915
nodeSelector: {}
2916
## Tolerations for use with node taints
2917
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
2918
##
2919
tolerations: []
2920
# - key: "key"
2921
# operator: "Equal"
2922
# value: "value"
2923
# effect: "NoSchedule"
2924
2925
## Assign custom affinity rules to the prometheus operator
2926
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
2927
##
2928
affinity: {}
2929
# nodeAffinity:
2930
# requiredDuringSchedulingIgnoredDuringExecution:
2931
# nodeSelectorTerms:
2932
# - matchExpressions:
2933
# - key: kubernetes.io/e2e-az-name
2934
# operator: In
2935
# values:
2936
# - e2e-az1
2937
# - e2e-az2
2938
dnsConfig: {}
2939
# nameservers:
2940
# - 1.2.3.4
2941
# searches:
2942
# - ns1.svc.cluster-domain.example
2943
# - my.dns.search.suffix
2944
# options:
2945
# - name: ndots
2946
# value: "2"
2947
# - name: edns0
2948
securityContext:
2949
fsGroup: 65534
2950
runAsGroup: 65534
2951
runAsNonRoot: true
2952
runAsUser: 65534
2953
seccompProfile:
2954
type: RuntimeDefault
2955
## Setup hostUsers for prometheus-operator
2956
## ref: https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/
2957
hostUsers: ~
2958
## Container-specific security context configuration
2959
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2960
##
2961
containerSecurityContext:
2962
allowPrivilegeEscalation: false
2963
readOnlyRootFilesystem: true
2964
capabilities:
2965
drop:
2966
- ALL
2967
# Enable vertical pod autoscaler support for prometheus-operator
2968
verticalPodAutoscaler:
2969
enabled: false
2970
# Recommender responsible for generating recommendation for the object.
2971
# List should be empty (then the default recommender will generate the recommendation)
2972
# or contain exactly one recommender.
2973
# recommenders:
2974
# - name: custom-recommender-performance
2975
2976
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
2977
controlledResources: []
2978
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
2979
# controlledValues: RequestsAndLimits
2980
2981
# Define the max allowed resources for the pod
2982
maxAllowed: {}
2983
# cpu: 200m
2984
# memory: 100Mi
2985
# Define the min allowed resources for the pod
2986
minAllowed: {}
2987
# cpu: 200m
2988
# memory: 100Mi
2989
2990
updatePolicy:
2991
# Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
2992
# minReplicas: 1
2993
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
2994
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
2995
updateMode: Recreate
2996
## Prometheus-operator image
2997
##
2998
image:
2999
registry: cgr.dev
3000
repository: chainguard-private/prometheus-operator
3001
# if not set appVersion field from Chart.yaml is used
3002
tag: latest
3003
sha: sha256:e84bb830eacf095e21ba9e334fa9addf068ef6d9b6dd7e5e1144bba1bd30bae7
3004
pullPolicy: IfNotPresent
3005
## Prometheus image to use for prometheuses managed by the operator
3006
##
3007
# prometheusDefaultBaseImage: prometheus/prometheus
3008
3009
## Prometheus image registry to use for prometheuses managed by the operator
3010
##
3011
# prometheusDefaultBaseImageRegistry: quay.io
3012
3013
## Alertmanager image to use for alertmanagers managed by the operator
3014
##
3015
# alertmanagerDefaultBaseImage: prometheus/alertmanager
3016
3017
## Alertmanager image registry to use for alertmanagers managed by the operator
3018
##
3019
# alertmanagerDefaultBaseImageRegistry: quay.io
3020
3021
## Prometheus-config-reloader
3022
##
3023
prometheusConfigReloader:
3024
image:
3025
registry: cgr.dev
3026
repository: chainguard-private/prometheus-config-reloader
3027
# if not set appVersion field from Chart.yaml is used
3028
tag: latest
3029
sha: sha256:31e1e672a0f85742826d390870b4f8e522f174306a29a5053a1980a0244afc2a
3030
# add prometheus config reloader liveness and readiness probe. Default: false
3031
enableProbe: false
3032
# resource config for prometheusConfigReloader
3033
resources: {}
3034
# requests:
3035
# cpu: 200m
3036
# memory: 50Mi
3037
# limits:
3038
# cpu: 200m
3039
# memory: 50Mi
3040
## Thanos side-car image when configured
3041
##
3042
thanosImage:
3043
registry: cgr.dev
3044
repository: chainguard-private/thanos
3045
tag: latest
3046
sha: sha256:d4c37033c9f29424057f65ebe77a557e43ee0c1269c2f3b87368c0abbc1f74bd
3047
## Set a Label Selector to filter watched prometheus and prometheusAgent
3048
##
3049
prometheusInstanceSelector: ""
3050
## Set a Label Selector to filter watched alertmanager
3051
##
3052
alertmanagerInstanceSelector: ""
3053
## Set a Label Selector to filter watched thanosRuler
3054
thanosRulerInstanceSelector: ""
3055
## Set a Field Selector to filter watched secrets
3056
##
3057
secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1"
3058
## If false then the user will opt out of automounting API credentials.
3059
##
3060
automountServiceAccountToken: true
3061
## Additional volumes
3062
##
3063
extraVolumes: []
3064
## Additional volume mounts
3065
##
3066
extraVolumeMounts: []
3067
## Deploy a Prometheus instance
3068
##
3069
prometheus:
3070
enabled: true
3071
## Toggle prometheus into agent mode
3072
## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode.
3073
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/prometheus-agent.md
3074
##
3075
agentMode: false
3076
## Annotations for Prometheus
3077
##
3078
annotations: {}
3079
## Additional labels for Prometheus
3080
##
3081
additionalLabels: {}
3082
## Configure network policy for the prometheus
3083
networkPolicy:
3084
enabled: false
3085
## Flavor of the network policy to use.
3086
# Can be:
3087
# * kubernetes for networking.k8s.io/v1/NetworkPolicy
3088
# * cilium for cilium.io/v2/CiliumNetworkPolicy
3089
flavor: kubernetes
3090
namespace:
3091
# cilium:
3092
# endpointSelector:
3093
# egress:
3094
# ingress:
3095
3096
# egress:
3097
# - {}
3098
# ingress:
3099
# - {}
3100
# podSelector:
3101
# matchLabels:
3102
# app: prometheus
3103
## Service account for Prometheuses to use.
3104
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
3105
##
3106
serviceAccount:
3107
create: true
3108
name: ""
3109
annotations: {}
3110
automountServiceAccountToken: true
3111
# Service for thanos service discovery on sidecar
3112
# Enable this can make Thanos Query can use
3113
# `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery
3114
# Thanos sidecar on prometheus nodes
3115
# (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!)
3116
thanosService:
3117
enabled: false
3118
annotations: {}
3119
labels: {}
3120
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3121
##
3122
externalTrafficPolicy: Cluster
3123
## Service type
3124
##
3125
type: ClusterIP
3126
## Service dual stack
3127
##
3128
ipDualStack:
3129
enabled: false
3130
ipFamilies: ["IPv6", "IPv4"]
3131
ipFamilyPolicy: "PreferDualStack"
3132
## gRPC port config
3133
portName: grpc
3134
port: 10901
3135
targetPort: "grpc"
3136
## HTTP port config (for metrics)
3137
httpPortName: http
3138
httpPort: 10902
3139
targetHttpPort: "http"
3140
## ClusterIP to assign
3141
# Default is to make this a headless service ("None")
3142
clusterIP: "None"
3143
## Port to expose on each node, if service type is NodePort
3144
##
3145
nodePort: 30901
3146
httpNodePort: 30902
3147
# ServiceMonitor to scrape Sidecar metrics
3148
# Needs thanosService to be enabled as well
3149
thanosServiceMonitor:
3150
enabled: false
3151
interval: ""
3152
## Additional labels
3153
##
3154
additionalLabels: {}
3155
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
3156
scheme: ""
3157
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
3158
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
3159
tlsConfig: {}
3160
bearerTokenFile:
3161
## Metric relabel configs to apply to samples before ingestion.
3162
metricRelabelings: []
3163
## relabel configs to apply to samples before ingestion.
3164
relabelings: []
3165
# Service for external access to sidecar
3166
# Enabling this creates a service to expose thanos-sidecar outside the cluster.
3167
thanosServiceExternal:
3168
enabled: false
3169
annotations: {}
3170
labels: {}
3171
loadBalancerIP: ""
3172
loadBalancerSourceRanges: []
3173
## gRPC port config
3174
portName: grpc
3175
port: 10901
3176
targetPort: "grpc"
3177
## HTTP port config (for metrics)
3178
httpPortName: http
3179
httpPort: 10902
3180
targetHttpPort: "http"
3181
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3182
##
3183
externalTrafficPolicy: Cluster
3184
## Service type
3185
##
3186
type: LoadBalancer
3187
## Port to expose on each node
3188
##
3189
nodePort: 30901
3190
httpNodePort: 30902
3191
## Configuration for Prometheus service
3192
##
3193
service:
3194
enabled: true
3195
annotations: {}
3196
labels: {}
3197
clusterIP: ""
3198
ipDualStack:
3199
enabled: false
3200
ipFamilies: ["IPv6", "IPv4"]
3201
ipFamilyPolicy: "PreferDualStack"
3202
## Port for Prometheus Service to listen on
3203
##
3204
port: 9090
3205
## To be used with a proxy extraContainer port
3206
targetPort: 9090
3207
## Port for Prometheus Reloader to listen on
3208
##
3209
reloaderWebPort: 8080
3210
## Port to expose for Prometheus Reloader
3211
## Only used if service.type is 'NodePort'
3212
##
3213
reloaderWebNodePort: null
3214
## List of IP addresses at which the Prometheus server service is available
3215
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
3216
##
3217
externalIPs: []
3218
## Port to expose on each node
3219
## Only used if service.type is 'NodePort'
3220
##
3221
nodePort: 30090
3222
## Loadbalancer IP
3223
## Only use if service.type is "LoadBalancer"
3224
loadBalancerIP: ""
3225
loadBalancerSourceRanges: []
3226
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3227
##
3228
externalTrafficPolicy: Cluster
3229
## Service type
3230
##
3231
type: ClusterIP
3232
## Additional ports to open for Prometheus service
3233
##
3234
additionalPorts: []
3235
# additionalPorts:
3236
# - name: oauth-proxy
3237
# port: 8081
3238
# targetPort: 8081
3239
# - name: oauth-metrics
3240
# port: 8082
3241
# targetPort: 8082
3242
3243
## Consider that all endpoints are considered "ready" even if the Pods themselves are not
3244
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
3245
publishNotReadyAddresses: false
3246
## If you want to make sure that connections from a particular client are passed to the same Pod each time
3247
## Accepts 'ClientIP' or 'None'
3248
##
3249
sessionAffinity: None
3250
## If you want to modify the ClientIP sessionAffinity timeout
3251
## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
3252
##
3253
sessionAffinityConfig:
3254
clientIP:
3255
timeoutSeconds: 10800
3256
## Configuration for creating a separate Service for each statefulset Prometheus replica
3257
##
3258
servicePerReplica:
3259
enabled: false
3260
annotations: {}
3261
## Port for Prometheus Service per replica to listen on
3262
##
3263
port: 9090
3264
## To be used with a proxy extraContainer port
3265
targetPort: 9090
3266
## Port to expose on each node
3267
## Only used if servicePerReplica.type is 'NodePort'
3268
##
3269
nodePort: 30091
3270
## Loadbalancer source IP ranges
3271
## Only used if servicePerReplica.type is "LoadBalancer"
3272
loadBalancerSourceRanges: []
3273
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3274
##
3275
externalTrafficPolicy: Cluster
3276
## Service type
3277
##
3278
type: ClusterIP
3279
## Service dual stack
3280
##
3281
ipDualStack:
3282
enabled: false
3283
ipFamilies: ["IPv6", "IPv4"]
3284
ipFamilyPolicy: "PreferDualStack"
3285
## Configure pod disruption budgets for Prometheus
3286
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
3287
##
3288
podDisruptionBudget:
3289
enabled: false
3290
minAvailable: 1
3291
# maxUnavailable: ""
3292
unhealthyPodEvictionPolicy: AlwaysAllow
3293
## Enable vertical pod autoscaler support for Prometheus
3294
## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
3295
##
3296
verticalPodAutoscaler:
3297
enabled: false
3298
# Recommender responsible for generating recommendation for the object.
3299
# List should be empty (then the default recommender will generate the recommendation)
3300
# or contain exactly one recommender.
3301
# recommenders:
3302
# - name: custom-recommender-performance
3303
3304
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
3305
controlledResources: []
3306
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
3307
# controlledValues: RequestsAndLimits
3308
3309
# Define the max allowed resources for the pod
3310
maxAllowed: {}
3311
# cpu: 200m
3312
# memory: 100Mi
3313
# Define the min allowed resources for the pod
3314
minAllowed: {}
3315
# cpu: 200m
3316
# memory: 100Mi
3317
3318
updatePolicy:
3319
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
3320
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
3321
updateMode: Recreate
3322
# Ingress exposes thanos sidecar outside the cluster
3323
thanosIngress:
3324
enabled: false
3325
ingressClassName: ""
3326
annotations: {}
3327
labels: {}
3328
servicePort: 10901
3329
## Port to expose on each node
3330
## Only used if service.type is 'NodePort'
3331
##
3332
nodePort: 30901
3333
## Hosts must be provided if Ingress is enabled.
3334
##
3335
hosts: []
3336
# - thanos-gateway.domain.com
3337
3338
## Paths to use for ingress rules
3339
##
3340
paths: []
3341
# - /
3342
3343
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3344
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3345
# pathType: ImplementationSpecific
3346
3347
## TLS configuration for Thanos Ingress
3348
## Secret must be manually created in the namespace
3349
##
3350
tls: []
3351
# - secretName: thanos-gateway-tls
3352
# hosts:
3353
# - thanos-gateway.domain.com
3354
#
3355
## ExtraSecret can be used to store various data in an extra secret
3356
## (use it for example to store hashed basic auth credentials)
3357
extraSecret:
3358
## if not set, name will be auto generated
3359
# name: ""
3360
annotations: {}
3361
data: {}
3362
# auth: |
3363
# foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
3364
# someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
3365
3366
ingress:
3367
enabled: false
3368
ingressClassName: ""
3369
annotations: {}
3370
labels: {}
3371
## Redirect ingress to an additional defined port on the service
3372
# servicePort: 8081
3373
3374
## Hostnames.
3375
## Must be provided if Ingress is enabled.
3376
##
3377
# hosts:
3378
# - prometheus.domain.com
3379
hosts: []
3380
## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
3381
##
3382
paths: []
3383
# - /
3384
3385
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3386
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3387
# pathType: ImplementationSpecific
3388
3389
## TLS configuration for Prometheus Ingress
3390
## Secret must be manually created in the namespace
3391
##
3392
tls: []
3393
# - secretName: prometheus-general-tls
3394
# hosts:
3395
# - prometheus.example.com
3396
# -- BETA: Configure the gateway routes for the chart here.
3397
# More routes can be added by adding a dictionary key like the 'main' route.
3398
# Be aware that this is an early beta of this feature,
3399
# kube-prometheus-stack does not guarantee this works and is subject to change.
3400
# Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
3401
# [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
3402
route:
3403
main:
3404
# -- Enables or disables the route
3405
enabled: false
3406
# -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
3407
apiVersion: gateway.networking.k8s.io/v1
3408
# -- Set the route kind
3409
# Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
3410
kind: HTTPRoute
3411
annotations: {}
3412
labels: {}
3413
hostnames: []
3414
# - my-filter.example.com
3415
parentRefs: []
3416
# - name: acme-gw
3417
3418
# -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
3419
## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
3420
## matches, filters and additionalRules will be ignored if this is set to true. Be are
3421
httpsRedirect: false
3422
matches:
3423
- path:
3424
type: PathPrefix
3425
value: /
3426
## Filters define the filters that are applied to requests that match this rule.
3427
filters: []
3428
## Session persistence configuration for the route rule.
3429
sessionPersistence: {}
3430
# sessionName: route
3431
# type: Cookie
3432
# absoluteTimeout: 12h
3433
# cookieConfig:
3434
# lifetimeType: Permanent
3435
3436
## Additional custom rules that can be added to the route
3437
additionalRules: []
3438
## Configuration for creating an Ingress that will map to each Prometheus replica service
3439
## prometheus.servicePerReplica must be enabled
3440
##
3441
ingressPerReplica:
3442
enabled: false
3443
ingressClassName: ""
3444
annotations: {}
3445
labels: {}
3446
## Final form of the hostname for each per replica ingress is
3447
## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
3448
##
3449
## Prefix for the per replica ingress that will have `-$replicaNumber`
3450
## appended to the end
3451
hostPrefix: ""
3452
## Domain that will be used for the per replica ingress
3453
hostDomain: ""
3454
## Paths to use for ingress rules
3455
##
3456
paths: []
3457
# - /
3458
3459
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3460
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3461
# pathType: ImplementationSpecific
3462
3463
## Secret name containing the TLS certificate for Prometheus per replica ingress
3464
## Secret must be manually created in the namespace
3465
tlsSecretName: ""
3466
## Separated secret for each per replica Ingress. Can be used together with cert-manager
3467
##
3468
tlsSecretPerReplica:
3469
enabled: false
3470
## Final form of the secret for each per replica ingress is
3471
## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
3472
##
3473
prefix: "prometheus"
3474
serviceMonitor:
3475
## If true, create a serviceMonitor for prometheus
3476
##
3477
selfMonitor: true
3478
## Scrape interval. If not set, the Prometheus default scrape interval is used.
3479
##
3480
interval: ""
3481
## Additional labels
3482
##
3483
additionalLabels: {}
3484
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
3485
##
3486
sampleLimit: 0
3487
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
3488
##
3489
targetLimit: 0
3490
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3491
##
3492
labelLimit: 0
3493
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3494
##
3495
labelNameLengthLimit: 0
3496
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3497
##
3498
labelValueLengthLimit: 0
3499
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
3500
scheme: ""
3501
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
3502
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
3503
tlsConfig: {}
3504
bearerTokenFile:
3505
## Metric relabel configs to apply to samples before ingestion.
3506
##
3507
metricRelabelings: []
3508
# - action: keep
3509
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
3510
# sourceLabels: [__name__]
3511
3512
# relabel configs to apply to samples before ingestion.
3513
##
3514
relabelings: []
3515
# - sourceLabels: [__meta_kubernetes_pod_node_name]
3516
# separator: ;
3517
# regex: ^(.*)$
3518
# targetLabel: nodename
3519
# replacement: $1
3520
# action: replace
3521
3522
## Additional Endpoints
3523
##
3524
additionalEndpoints: []
3525
# - port: oauth-metrics
3526
# path: /metrics
3527
## Settings affecting prometheusSpec
3528
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheusspec
3529
##
3530
prometheusSpec:
3531
## Statefulset's persistent volume claim retention policy
3532
## whenDeleted and whenScaled determine whether
3533
## statefulset's PVCs are deleted (true) or retained (false)
3534
## on scaling down and deleting statefulset, respectively.
3535
## Requires Kubernetes version 1.27.0+.
3536
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
3537
persistentVolumeClaimRetentionPolicy: {}
3538
# whenDeleted: Retain
3539
# whenScaled: Retain
3540
3541
## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
3542
##
3543
disableCompaction: false
3544
## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod,
3545
## If the field isn't set, the operator mounts the service account token by default.
3546
## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery,
3547
## It is possible to use strategic merge patch to project the service account token into the 'prometheus' container.
3548
automountServiceAccountToken: true
3549
## APIServerConfig
3550
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#apiserverconfig
3551
##
3552
apiserverConfig: {}
3553
## Allows setting additional arguments for the Prometheus container
3554
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.Prometheus
3555
additionalArgs: []
3556
## Convert all classic histograms to native histograms with custom buckets.
3557
## This corresponds to the 'convert_classic_histograms_to_nhcb' field in Prometheus configuration.
3558
##
3559
convertClassicHistogramsToNHCB: false
3560
## Enable scraping of classic histograms that are also exposed as native histograms.
3561
## This corresponds to the 'always_scrape_classic_histograms' field in Prometheus configuration.
3562
##
3563
scrapeClassicHistograms: false
3564
## Enable scraping of native histograms.
3565
## This corresponds to the 'scrape_native_histograms' field in Prometheus configuration.
3566
##
3567
scrapeNativeHistograms: false
3568
## File to which scrape failures are logged.
3569
## Reloading the configuration will reopen the file.
3570
## Defaults to empty (disabled)
3571
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.Prometheus
3572
##
3573
scrapeFailureLogFile: ""
3574
## Interval between consecutive scrapes.
3575
## Defaults to 30s.
3576
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
3577
##
3578
scrapeInterval: ""
3579
## Number of seconds to wait for target to respond before erroring
3580
##
3581
scrapeTimeout: ""
3582
## List of scrape classes to expose to scraping objects such as
3583
## PodMonitors, ServiceMonitors, Probes and ScrapeConfigs.
3584
##
3585
scrapeClasses: []
3586
# - name: istio-mtls
3587
# default: false
3588
# tlsConfig:
3589
# caFile: /etc/prometheus/secrets/istio.default/root-cert.pem
3590
# certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem
3591
3592
## PodTargetLabels are appended to the `spec.podTargetLabels` field of all PodMonitor and ServiceMonitor objects.
3593
##
3594
podTargetLabels: []
3595
# - customlabel
3596
3597
## Interval between consecutive evaluations.
3598
##
3599
evaluationInterval: ""
3600
## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
3601
##
3602
listenLocal: false
3603
## enableOTLPReceiver enables the OTLP receiver for Prometheus.
3604
enableOTLPReceiver: false
3605
## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
3606
## This is disabled by default.
3607
## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
3608
##
3609
enableAdminAPI: false
3610
## Sets version of Prometheus overriding the Prometheus version as derived
3611
## from the image tag. Useful in cases where the tag does not follow semver v2.
3612
version: ""
3613
## WebTLSConfig defines the TLS parameters for HTTPS
3614
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#webtlsconfig
3615
web: {}
3616
## Exemplars related settings that are runtime reloadable.
3617
## It requires to enable the exemplar storage feature to be effective.
3618
exemplars: {}
3619
## Maximum number of exemplars stored in memory for all series.
3620
## If not set, Prometheus uses its default value.
3621
## A value of zero or less than zero disables the storage.
3622
# maxSize: 100000
3623
3624
# EnableFeatures API enables access to Prometheus disabled features.
3625
# ref: https://prometheus.io/docs/prometheus/latest/feature_flags/
3626
enableFeatures: []
3627
# - exemplar-storage
3628
3629
## https://prometheus.io/docs/guides/opentelemetry
3630
##
3631
otlp: {}
3632
# promoteResourceAttributes: []
3633
# keepIdentifyingResourceAttributes: false
3634
# translationStrategy: NoUTF8EscapingWithSuffixes
3635
# convertHistogramsToNHCB: false
3636
3637
##
3638
serviceName:
3639
## Image of Prometheus.
3640
##
3641
image:
3642
registry: cgr.dev
3643
repository: chainguard-private/prometheus
3644
tag: latest
3645
sha: sha256:fb0dcc4117889b88a2a432f2398140287c2de92752251cd3ae61fe6bdb68ebd0
3646
pullPolicy: IfNotPresent
3647
## Tolerations for use with node taints
3648
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
3649
##
3650
tolerations: []
3651
# - key: "key"
3652
# operator: "Equal"
3653
# value: "value"
3654
# effect: "NoSchedule"
3655
3656
## If specified, the pod's topology spread constraints.
3657
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
3658
##
3659
topologySpreadConstraints: []
3660
# - maxSkew: 1
3661
# topologyKey: topology.kubernetes.io/zone
3662
# whenUnsatisfiable: DoNotSchedule
3663
# labelSelector:
3664
# matchLabels:
3665
# app: prometheus
3666
3667
## Disable alerting
3668
##
3669
disableAlerting: false
3670
## Alertmanagers to which alerts will be sent
3671
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerendpoints
3672
##
3673
## Default configuration will connect to the alertmanager deployed as part of this release
3674
##
3675
alertingEndpoints: []
3676
# - name: ""
3677
# namespace: ""
3678
# port: http
3679
# scheme: http
3680
# pathPrefix: ""
3681
# tlsConfig: {}
3682
# bearerTokenFile: ""
3683
# apiVersion: v2
3684
3685
## External labels to add to any time series or alerts when communicating with external systems
3686
##
3687
externalLabels: {}
3688
## enable --web.enable-remote-write-receiver flag on prometheus-server
3689
##
3690
enableRemoteWriteReceiver: false
3691
## Name of the external label used to denote replica name
3692
##
3693
replicaExternalLabelName: ""
3694
## If true, the Operator won't add the external label used to denote replica name
3695
##
3696
replicaExternalLabelNameClear: false
3697
## Name of the external label used to denote Prometheus instance name
3698
##
3699
prometheusExternalLabelName: ""
3700
## If true, the Operator won't add the external label used to denote Prometheus instance name
3701
##
3702
prometheusExternalLabelNameClear: false
3703
## External URL at which Prometheus will be reachable.
3704
##
3705
externalUrl: ""
3706
## Define which Nodes the Pods are scheduled on.
3707
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
3708
##
3709
nodeSelector: {}
3710
## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
3711
## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
3712
## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
3713
## with the new list of secrets.
3714
##
3715
secrets: []
3716
## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
3717
## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
3718
##
3719
configMaps: []
3720
## QuerySpec defines the query command line flags when starting Prometheus.
3721
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#queryspec
3722
##
3723
query: {}
3724
## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery.
3725
ruleNamespaceSelector: {}
3726
## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel"
3727
# ruleNamespaceSelector:
3728
# matchLabels:
3729
# prometheus: somelabel
3730
3731
## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
3732
## prometheus resource to be created with selectors based on values in the helm deployment,
3733
## which will also match the PrometheusRule resources created
3734
##
3735
ruleSelectorNilUsesHelmValues: true
3736
## PrometheusRules to be selected for target discovery.
3737
## If {}, select all PrometheusRules
3738
##
3739
ruleSelector: {}
3740
## Example which select all PrometheusRules resources
3741
## with label "prometheus" with values any of "example-rules" or "example-rules-2"
3742
# ruleSelector:
3743
# matchExpressions:
3744
# - key: prometheus
3745
# operator: In
3746
# values:
3747
# - example-rules
3748
# - example-rules-2
3749
#
3750
## Example which select all PrometheusRules resources with label "role" set to "example-rules"
3751
# ruleSelector:
3752
# matchLabels:
3753
# role: example-rules
3754
3755
## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
3756
## prometheus resource to be created with selectors based on values in the helm deployment,
3757
## which will also match the servicemonitors created
3758
##
3759
serviceMonitorSelectorNilUsesHelmValues: true
3760
## ServiceMonitors to be selected for target discovery.
3761
## If {}, select all ServiceMonitors
3762
##
3763
serviceMonitorSelector: {}
3764
## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
3765
# serviceMonitorSelector:
3766
# matchLabels:
3767
# prometheus: somelabel
3768
3769
## Namespaces to be selected for ServiceMonitor discovery.
3770
##
3771
serviceMonitorNamespaceSelector: {}
3772
## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel"
3773
# serviceMonitorNamespaceSelector:
3774
# matchLabels:
3775
# prometheus: somelabel
3776
3777
## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
3778
## prometheus resource to be created with selectors based on values in the helm deployment,
3779
## which will also match the podmonitors created
3780
##
3781
podMonitorSelectorNilUsesHelmValues: true
3782
## PodMonitors to be selected for target discovery.
3783
## If {}, select all PodMonitors
3784
##
3785
podMonitorSelector: {}
3786
## Example which selects PodMonitors with label "prometheus" set to "somelabel"
3787
# podMonitorSelector:
3788
# matchLabels:
3789
# prometheus: somelabel
3790
3791
## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery.
3792
podMonitorNamespaceSelector: {}
3793
## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel"
3794
# podMonitorNamespaceSelector:
3795
# matchLabels:
3796
# prometheus: somelabel
3797
3798
## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
3799
## prometheus resource to be created with selectors based on values in the helm deployment,
3800
## which will also match the probes created
3801
##
3802
probeSelectorNilUsesHelmValues: true
3803
## Probes to be selected for target discovery.
3804
## If {}, select all Probes
3805
##
3806
probeSelector: {}
3807
## Example which selects Probes with label "prometheus" set to "somelabel"
3808
# probeSelector:
3809
# matchLabels:
3810
# prometheus: somelabel
3811
3812
## If nil, select own namespace. Namespaces to be selected for Probe discovery.
3813
probeNamespaceSelector: {}
3814
## Example which selects Probe in namespaces with label "prometheus" set to "somelabel"
3815
# probeNamespaceSelector:
3816
# matchLabels:
3817
# prometheus: somelabel
3818
3819
## If true, a nil or {} value for prometheus.prometheusSpec.scrapeConfigSelector will cause the
3820
## prometheus resource to be created with selectors based on values in the helm deployment,
3821
## which will also match the scrapeConfigs created
3822
##
3823
## If null and scrapeConfigSelector is also null, exclude field from the prometheusSpec
3824
## (keeping downward compatibility with older versions of CRD)
3825
##
3826
scrapeConfigSelectorNilUsesHelmValues: true
3827
## scrapeConfigs to be selected for target discovery.
3828
## If {}, select all scrapeConfigs
3829
##
3830
scrapeConfigSelector: {}
3831
## Example which selects scrapeConfigs with label "prometheus" set to "somelabel"
3832
# scrapeConfigSelector:
3833
# matchLabels:
3834
# prometheus: somelabel
3835
3836
## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery.
3837
## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD)
3838
scrapeConfigNamespaceSelector: {}
3839
## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel"
3840
# scrapeConfigNamespaceSelector:
3841
# matchLabels:
3842
# prometheus: somelabel
3843
3844
## How long to retain metrics
3845
##
3846
retention: 10d
3847
## Maximum size of metrics
3848
## Unit format should be in the form of "50GiB"
3849
retentionSize: ""
3850
## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration
3851
## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb
3852
tsdb:
3853
outOfOrderTimeWindow: 0s
3854
## Enable compression of the write-ahead log using Snappy.
3855
##
3856
walCompression: true
3857
## If true, the Operator won't process any Prometheus configuration changes
3858
##
3859
paused: false
3860
## Number of replicas of each shard to deploy for a Prometheus deployment.
3861
## Number of replicas multiplied by shards is the total number of Pods created.
3862
##
3863
replicas: 1
3864
## EXPERIMENTAL: Number of shards to distribute targets onto.
3865
## Number of replicas multiplied by shards is the total number of Pods created.
3866
## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
3867
## Increasing shards will not reshard data either but it will continue to be available from the same instances.
3868
## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
3869
## Sharding is done on the content of the `__address__` target meta-label.
3870
##
3871
shards: 1
3872
## Log level for Prometheus be configured in
3873
##
3874
logLevel: info
3875
## Log format for Prometheus be configured in
3876
##
3877
logFormat: logfmt
3878
## Prefix used to register routes, overriding externalUrl route.
3879
## Useful for proxies that rewrite URLs.
3880
##
3881
routePrefix: /
3882
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
3883
## Metadata Labels and Annotations gets propagated to the prometheus pods.
3884
##
3885
podMetadata: {}
3886
# labels:
3887
# app: prometheus
3888
# k8s-app: prometheus
3889
3890
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
3891
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
3892
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
3893
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
3894
podAntiAffinity: "soft"
3895
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
3896
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
3897
##
3898
podAntiAffinityTopologyKey: kubernetes.io/hostname
3899
## Assign custom affinity rules to the prometheus instance
3900
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
3901
##
3902
affinity: {}
3903
# nodeAffinity:
3904
# requiredDuringSchedulingIgnoredDuringExecution:
3905
# nodeSelectorTerms:
3906
# - matchExpressions:
3907
# - key: kubernetes.io/e2e-az-name
3908
# operator: In
3909
# values:
3910
# - e2e-az1
3911
# - e2e-az2
3912
3913
## The remote_read spec configuration for Prometheus.
3914
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotereadspec
3915
remoteRead: []
3916
# - url: http://remote1/read
3917
## additionalRemoteRead is appended to remoteRead
3918
additionalRemoteRead: []
3919
## The remote_write spec configuration for Prometheus.
3920
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotewritespec
3921
remoteWrite: []
3922
# - url: http://remote1/push
3923
## additionalRemoteWrite is appended to remoteWrite
3924
additionalRemoteWrite: []
3925
## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
3926
remoteWriteDashboards: false
3927
## Resource limits & requests
3928
##
3929
resources: {}
3930
# requests:
3931
# memory: 400Mi
3932
3933
## Prometheus StorageSpec for persistent data
3934
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
3935
##
3936
storageSpec: {}
3937
## Using PersistentVolumeClaim
3938
##
3939
# volumeClaimTemplate:
3940
# spec:
3941
# storageClassName: gluster
3942
# accessModes: ["ReadWriteOnce"]
3943
# resources:
3944
# requests:
3945
# storage: 50Gi
3946
# selector: {}
3947
3948
## Using tmpfs volume
3949
##
3950
# emptyDir:
3951
# medium: Memory
3952
3953
# Additional volumes on the output StatefulSet definition.
3954
volumes: []
3955
# Additional VolumeMounts on the output StatefulSet definition.
3956
volumeMounts: []
3957
## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
3958
## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
3959
## as specified in the official Prometheus documentation:
3960
## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
3961
## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
3962
## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
3963
## scrape configs are going to break Prometheus after the upgrade.
3964
## AdditionalScrapeConfigs can be defined as a list or as a templated string.
3965
##
3966
## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the
3967
## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
3968
##
3969
additionalScrapeConfigs: []
3970
# - job_name: kube-etcd
3971
# kubernetes_sd_configs:
3972
# - role: node
3973
# scheme: https
3974
# tls_config:
3975
# ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
3976
# cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
3977
# key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
3978
# relabel_configs:
3979
# - action: labelmap
3980
# regex: __meta_kubernetes_node_label_(.+)
3981
# - source_labels: [__address__]
3982
# action: replace
3983
# target_label: __address__
3984
# regex: ([^:;]+):(\d+)
3985
# replacement: ${1}:2379
3986
# - source_labels: [__meta_kubernetes_node_name]
3987
# action: keep
3988
# regex: .*mst.*
3989
# - source_labels: [__meta_kubernetes_node_name]
3990
# action: replace
3991
# target_label: node
3992
# regex: (.*)
3993
# replacement: ${1}
3994
# metric_relabel_configs:
3995
# - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
3996
# action: labeldrop
3997
#
3998
## If scrape config contains a repetitive section, you may want to use a template.
3999
## In the following example, you can see how to define `gce_sd_configs` for multiple zones
4000
# additionalScrapeConfigs: |
4001
# - job_name: "node-exporter"
4002
# gce_sd_configs:
4003
# {{range $zone := .Values.gcp_zones}}
4004
# - project: "project1"
4005
# zone: "{{$zone}}"
4006
# port: 9100
4007
# {{end}}
4008
# relabel_configs:
4009
# ...
4010
4011
## If additional scrape configurations are already deployed in a single secret file you can use this section.
4012
## Expected values are the secret name and key
4013
## Cannot be used with additionalScrapeConfigs
4014
additionalScrapeConfigsSecret: {}
4015
# enabled: false
4016
# name:
4017
# key:
4018
4019
## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
4020
## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
4021
additionalPrometheusSecretsAnnotations: {}
4022
## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
4023
## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config.
4024
## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
4025
## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
4026
## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
4027
## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
4028
##
4029
additionalAlertManagerConfigs: []
4030
# - consul_sd_configs:
4031
# - server: consul.dev.test:8500
4032
# scheme: http
4033
# datacenter: dev
4034
# tag_separator: ','
4035
# services:
4036
# - metrics-prometheus-alertmanager
4037
4038
## If additional alertmanager configurations are already deployed in a single secret, or you want to manage
4039
## them separately from the helm deployment, you can use this section.
4040
## Expected values are the secret name and key
4041
## Cannot be used with additionalAlertManagerConfigs
4042
additionalAlertManagerConfigsSecret: {}
4043
# name:
4044
# key:
4045
# optional: false
4046
4047
## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
4048
## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
4049
## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
4050
## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
4051
## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
4052
## configs are going to break Prometheus after the upgrade.
4053
##
4054
additionalAlertRelabelConfigs: []
4055
# - separator: ;
4056
# regex: prometheus_replica
4057
# replacement: $1
4058
# action: labeldrop
4059
4060
## If additional alert relabel configurations are already deployed in a single secret, or you want to manage
4061
## them separately from the helm deployment, you can use this section.
4062
## Expected values are the secret name and key
4063
## Cannot be used with additionalAlertRelabelConfigs
4064
additionalAlertRelabelConfigsSecret: {}
4065
# name:
4066
# key:
4067
4068
## SecurityContext holds pod-level security attributes and common container settings.
4069
## This defaults to non root user with uid 1000 and gid 2000.
4070
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md
4071
##
4072
securityContext:
4073
runAsGroup: 2000
4074
runAsNonRoot: true
4075
runAsUser: 1000
4076
fsGroup: 2000
4077
seccompProfile:
4078
type: RuntimeDefault
4079
## DNS configuration for Prometheus.
4080
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig
4081
dnsConfig: {}
4082
## DNS policy for Prometheus.
4083
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias
4084
dnsPolicy: ""
4085
## Priority class assigned to the Pods
4086
##
4087
priorityClassName: ""
4088
## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
4089
## This section is experimental, it may change significantly without deprecation notice in any release.
4090
## This is experimental and may change significantly without backward compatibility in any release.
4091
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosspec
4092
##
4093
thanos: {}
4094
# image: quay.io/thanos/thanos
4095
# secretProviderClass:
4096
# provider: gcp
4097
# parameters:
4098
# secrets: |
4099
# - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest"
4100
# fileName: "objstore.yaml"
4101
## ObjectStorageConfig configures object storage in Thanos.
4102
# objectStorageConfig:
4103
# # use existing secret, if configured, objectStorageConfig.secret will not be used
4104
# existingSecret: {}
4105
# # name: ""
4106
# # key: ""
4107
# # will render objectStorageConfig secret data and configure it to be used by Thanos custom resource,
4108
# # ignored when prometheusspec.thanos.objectStorageConfig.existingSecret is set
4109
# # https://thanos.io/tip/thanos/storage.md/#s3
4110
# secret: {}
4111
# # type: S3
4112
# # config:
4113
# # bucket: ""
4114
# # endpoint: ""
4115
# # region: ""
4116
# # access_key: ""
4117
# # secret_key: ""
4118
4119
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
4120
## if using proxy extraContainer update targetPort with proxy container port
4121
containers: []
4122
# containers:
4123
# - name: oauth-proxy
4124
# image: quay.io/oauth2-proxy/oauth2-proxy:v7.15.2
4125
# args:
4126
# - --upstream=http://127.0.0.1:9090
4127
# - --http-address=0.0.0.0:8081
4128
# - --metrics-address=0.0.0.0:8082
4129
# - ...
4130
# ports:
4131
# - containerPort: 8081
4132
# name: oauth-proxy
4133
# protocol: TCP
4134
# - containerPort: 8082
4135
# name: oauth-metrics
4136
# protocol: TCP
4137
# resources: {}
4138
4139
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
4140
## (permissions, dir tree) on mounted volumes before starting prometheus
4141
initContainers: []
4142
## PortName to use for Prometheus.
4143
##
4144
portName: "http-web"
4145
## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
4146
## on the file system of the Prometheus container e.g. bearer token files.
4147
arbitraryFSAccessThroughSMs: false
4148
## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
4149
## or PodMonitor to true, this overrides honor_labels to false.
4150
overrideHonorLabels: false
4151
## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
4152
overrideHonorTimestamps: false
4153
## When ignoreNamespaceSelectors is set to true, namespaceSelector from all PodMonitor, ServiceMonitor and Probe objects will be ignored,
4154
## they will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object,
4155
## and servicemonitors will be installed in the default service namespace.
4156
## Defaults to false.
4157
ignoreNamespaceSelectors: false
4158
## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
4159
## The label value will always be the namespace of the object that is being created.
4160
## Disabled by default
4161
enforcedNamespaceLabel: ""
4162
## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
4163
## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
4164
## Deprecated, use `excludedFromEnforcement` instead
4165
prometheusRulesExcludedFromEnforce: []
4166
## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects
4167
## to be excluded from enforcing a namespace label of origin.
4168
## Works only if enforcedNamespaceLabel set to true.
4169
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#objectreference
4170
excludedFromEnforcement: []
4171
## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
4172
## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
4173
## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
4174
## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
4175
queryLogFile: false
4176
# Use to set global sample_limit for Prometheus. This act as default SampleLimit for ServiceMonitor or/and PodMonitor.
4177
# Set to 'false' to disable global sample_limit. or set to a number to override the default value.
4178
sampleLimit: false
4179
# EnforcedKeepDroppedTargetsLimit defines on the number of targets dropped by relabeling that will be kept in memory.
4180
# The value overrides any spec.keepDroppedTargets set by ServiceMonitor, PodMonitor, Probe objects unless spec.keepDroppedTargets
4181
# is greater than zero and less than spec.enforcedKeepDroppedTargets. 0 means no limit.
4182
enforcedKeepDroppedTargets: 0
4183
## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
4184
## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
4185
## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
4186
enforcedSampleLimit: false
4187
## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
4188
## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
4189
## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
4190
## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
4191
enforcedTargetLimit: false
4192
## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
4193
## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
4194
## 2.27.0 and newer.
4195
enforcedLabelLimit: false
4196
## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
4197
## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
4198
## 2.27.0 and newer.
4199
enforcedLabelNameLengthLimit: false
4200
## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
4201
## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
4202
## versions 2.27.0 and newer.
4203
enforcedLabelValueLengthLimit: false
4204
## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
4205
## in Prometheus so it may change in any upcoming release.
4206
allowOverlappingBlocks: false
4207
## Specifies the validation scheme for metric and label names.
4208
## Supported values are: Legacy, UTF8
4209
nameValidationScheme: ""
4210
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
4211
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
4212
minReadySeconds: 0
4213
## Duration in seconds the pod needs to terminate gracefully.
4214
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
4215
terminationGracePeriodSeconds: ~
4216
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
4217
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
4218
# Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it.
4219
# When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically.
4220
hostNetwork: false
4221
## Use the host's user namespace for Prometheus pods.
4222
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
4223
hostUsers: ~
4224
# HostAlias holds the mapping between IP and hostnames that will be injected
4225
# as an entry in the pod's hosts file.
4226
hostAliases: []
4227
# - ip: 10.10.0.100
4228
# hostnames:
4229
# - a1.app.local
4230
# - b1.app.local
4231
4232
## TracingConfig configures tracing in Prometheus.
4233
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheustracingconfig
4234
tracingConfig: {}
4235
## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints.
4236
## If set, the value should be either "Endpoints" or "EndpointSlice". If unset, the operator assumes the "Endpoints" role.
4237
serviceDiscoveryRole: ""
4238
## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
4239
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
4240
podManagementPolicy: ""
4241
## Update strategy for the StatefulSet.
4242
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
4243
updateStrategy: {}
4244
# type: RollingUpdate
4245
# rollingUpdate:
4246
# maxUnavailable: 1
4247
4248
## Additional configuration which is not covered by the properties above. (passed through tpl)
4249
additionalConfig: {}
4250
## Additional configuration which is not covered by the properties above.
4251
## Useful, if you need advanced templating inside alertmanagerSpec.
4252
## Otherwise, use prometheus.prometheusSpec.additionalConfig (passed through tpl)
4253
additionalConfigString: ""
4254
## Defines the maximum time that the `prometheus` container's startup probe
4255
## will wait before being considered failed. The startup probe will return
4256
## success after the WAL replay is complete. If set, the value should be
4257
## greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15
4258
## minutes).
4259
maximumStartupDurationSeconds: 0
4260
## Set default scrapeProtocols for Prometheus instances
4261
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#scrapeprotocolstring-alias
4262
scrapeProtocols: []
4263
additionalRulesForClusterRole: []
4264
# - apiGroups: [ "" ]
4265
# resources:
4266
# - nodes/proxy
4267
# verbs: [ "get", "list", "watch" ]
4268
4269
additionalServiceMonitors: []
4270
## Name of the ServiceMonitor to create
4271
##
4272
# - name: ""
4273
4274
## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
4275
## the chart
4276
##
4277
# additionalLabels: {}
4278
4279
## Service label for use in assembling a job name of the form <label value>-<port>
4280
## If no label is specified, the service name is used.
4281
##
4282
# jobLabel: ""
4283
4284
## labels to transfer from the kubernetes service to the target
4285
##
4286
# targetLabels: []
4287
4288
## labels to transfer from the kubernetes pods to the target
4289
##
4290
# podTargetLabels: []
4291
4292
## Label selector for services to which this ServiceMonitor applies
4293
##
4294
# selector: {}
4295
## Example which selects all services to be monitored
4296
## with label "monitoredby" with values any of "example-service-1" or "example-service-2"
4297
# matchExpressions:
4298
# - key: "monitoredby"
4299
# operator: In
4300
# values:
4301
# - example-service-1
4302
# - example-service-2
4303
4304
## label selector for services
4305
##
4306
# matchLabels: {}
4307
4308
## Namespaces from which services are selected
4309
##
4310
# namespaceSelector:
4311
## Match any namespace
4312
##
4313
# any: false
4314
4315
## Explicit list of namespace names to select
4316
##
4317
# matchNames: []
4318
4319
## Endpoints of the selected service to be monitored
4320
##
4321
# endpoints: []
4322
## Name of the endpoint's service port
4323
## Mutually exclusive with targetPort
4324
# - port: ""
4325
4326
## Name or number of the endpoint's target port
4327
## Mutually exclusive with port
4328
# - targetPort: ""
4329
4330
## File containing bearer token to be used when scraping targets
4331
##
4332
# bearerTokenFile: ""
4333
4334
## Interval at which metrics should be scraped
4335
##
4336
# interval: 30s
4337
4338
## HTTP path to scrape for metrics
4339
##
4340
# path: /metrics
4341
4342
## HTTP scheme to use for scraping
4343
##
4344
# scheme: http
4345
4346
## TLS configuration to use when scraping the endpoint
4347
##
4348
# tlsConfig:
4349
4350
## Path to the CA file
4351
##
4352
# caFile: ""
4353
4354
## Path to client certificate file
4355
##
4356
# certFile: ""
4357
4358
## Skip certificate verification
4359
##
4360
# insecureSkipVerify: false
4361
4362
## Path to client key file
4363
##
4364
# keyFile: ""
4365
4366
## Server name used to verify host name
4367
##
4368
# serverName: ""
4369
4370
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
4371
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4372
##
4373
# metricRelabelings: []
4374
# - action: keep
4375
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
4376
# sourceLabels: [__name__]
4377
4378
## RelabelConfigs to apply to samples before scraping
4379
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4380
##
4381
# relabelings: []
4382
# - sourceLabels: [__meta_kubernetes_pod_node_name]
4383
# separator: ;
4384
# regex: ^(.*)$
4385
# targetLabel: nodename
4386
# replacement: $1
4387
# action: replace
4388
4389
## Fallback scrape protocol used by Prometheus for scraping metrics
4390
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
4391
##
4392
# fallbackScrapeProtocol: ""
4393
4394
## Attaches node metadata to the discovered targets
4395
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata
4396
##
4397
# attachMetadata:
4398
# node: true
4399
additionalPodMonitors: []
4400
## Name of the PodMonitor to create
4401
##
4402
# - name: ""
4403
## Additional labels to set used for the PodMonitorSelector. Together with standard labels from
4404
## the chart
4405
##
4406
# additionalLabels: {}
4407
4408
## Pod label for use in assembling a job name of the form <label value>-<port>
4409
## If no label is specified, the pod endpoint name is used.
4410
##
4411
# jobLabel: ""
4412
4413
## Label selector for pods to which this PodMonitor applies
4414
##
4415
# selector: {}
4416
## Example which selects all Pods to be monitored
4417
## with label "monitoredby" with values any of "example-pod-1" or "example-pod-2"
4418
# matchExpressions:
4419
# - key: "monitoredby"
4420
# operator: In
4421
# values:
4422
# - example-pod-1
4423
# - example-pod-2
4424
4425
## label selector for pods
4426
##
4427
# matchLabels: {}
4428
4429
## PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
4430
##
4431
# podTargetLabels: {}
4432
4433
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
4434
##
4435
# sampleLimit: 0
4436
4437
## Namespaces from which pods are selected
4438
##
4439
# namespaceSelector:
4440
## Match any namespace
4441
##
4442
# any: false
4443
4444
## Explicit list of namespace names to select
4445
##
4446
# matchNames: []
4447
4448
## Endpoints of the selected pods to be monitored
4449
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmetricsendpoint
4450
##
4451
# podMetricsEndpoints: []
4452
4453
## Fallback scrape protocol used by Prometheus for scraping metrics
4454
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
4455
##
4456
# fallbackScrapeProtocol: ""
4457
4458
## Attaches node metadata to the discovered targets
4459
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata
4460
##
4461
# attachMetadata:
4462
# node: true
4463
4464
## Configuration for thanosRuler
4465
## ref: https://thanos.io/tip/components/rule.md/
4466
##
4467
thanosRuler:
4468
## Deploy thanosRuler
4469
##
4470
enabled: false
4471
## Annotations for ThanosRuler
4472
##
4473
annotations: {}
4474
## Service account for ThanosRuler to use.
4475
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
4476
##
4477
serviceAccount:
4478
create: true
4479
name: ""
4480
annotations: {}
4481
## Configure pod disruption budgets for ThanosRuler
4482
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
4483
##
4484
podDisruptionBudget:
4485
enabled: false
4486
minAvailable: 1
4487
# maxUnavailable: ""
4488
unhealthyPodEvictionPolicy: AlwaysAllow
4489
ingress:
4490
enabled: false
4491
ingressClassName: ""
4492
annotations: {}
4493
labels: {}
4494
## Hosts must be provided if Ingress is enabled.
4495
##
4496
hosts: []
4497
# - thanosruler.domain.com
4498
4499
## Paths to use for ingress rules - one path should match the thanosruler.routePrefix
4500
##
4501
paths: []
4502
# - /
4503
4504
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
4505
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
4506
# pathType: ImplementationSpecific
4507
4508
## TLS configuration for ThanosRuler Ingress
4509
## Secret must be manually created in the namespace
4510
##
4511
tls: []
4512
# - secretName: thanosruler-general-tls
4513
# hosts:
4514
# - thanosruler.example.com
4515
# -- BETA: Configure the gateway routes for the chart here.
4516
# More routes can be added by adding a dictionary key like the 'main' route.
4517
# Be aware that this is an early beta of this feature,
4518
# kube-prometheus-stack does not guarantee this works and is subject to change.
4519
# Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
4520
# [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
4521
route:
4522
main:
4523
# -- Enables or disables the route
4524
enabled: false
4525
# -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
4526
apiVersion: gateway.networking.k8s.io/v1
4527
# -- Set the route kind
4528
# Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
4529
kind: HTTPRoute
4530
annotations: {}
4531
labels: {}
4532
hostnames: []
4533
# - my-filter.example.com
4534
parentRefs: []
4535
# - name: acme-gw
4536
4537
# -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
4538
## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
4539
## matches, filters and additionalRules will be ignored if this is set to true. Be are
4540
httpsRedirect: false
4541
matches:
4542
- path:
4543
type: PathPrefix
4544
value: /
4545
## Filters define the filters that are applied to requests that match this rule.
4546
filters: []
4547
## Session persistence configuration for the route rule.
4548
sessionPersistence: {}
4549
# sessionName: route
4550
# type: Cookie
4551
# absoluteTimeout: 12h
4552
# cookieConfig:
4553
# lifetimeType: Permanent
4554
4555
## Additional custom rules that can be added to the route
4556
additionalRules: []
4557
## Configuration for ThanosRuler service
4558
##
4559
service:
4560
enabled: true
4561
annotations: {}
4562
labels: {}
4563
clusterIP: ""
4564
ipDualStack:
4565
enabled: false
4566
ipFamilies: ["IPv6", "IPv4"]
4567
ipFamilyPolicy: "PreferDualStack"
4568
## Port for ThanosRuler Service to listen on
4569
##
4570
port: 10902
4571
## To be used with a proxy extraContainer port
4572
##
4573
targetPort: 10902
4574
## Port to expose on each node
4575
## Only used if service.type is 'NodePort'
4576
##
4577
nodePort: 30905
4578
## List of IP addresses at which the Prometheus server service is available
4579
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
4580
##
4581
4582
## Additional ports to open for ThanosRuler service
4583
additionalPorts: []
4584
externalIPs: []
4585
loadBalancerIP: ""
4586
loadBalancerSourceRanges: []
4587
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
4588
##
4589
externalTrafficPolicy: Cluster
4590
## Service type
4591
##
4592
type: ClusterIP
4593
## Configuration for creating a ServiceMonitor for the ThanosRuler service
4594
##
4595
serviceMonitor:
4596
## If true, create a serviceMonitor for thanosRuler
4597
##
4598
selfMonitor: true
4599
## Scrape interval. If not set, the Prometheus default scrape interval is used.
4600
##
4601
interval: ""
4602
## Additional labels
4603
##
4604
additionalLabels: {}
4605
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
4606
##
4607
sampleLimit: 0
4608
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
4609
##
4610
targetLimit: 0
4611
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4612
##
4613
labelLimit: 0
4614
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4615
##
4616
labelNameLengthLimit: 0
4617
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4618
##
4619
labelValueLengthLimit: 0
4620
## proxyUrl: URL of a proxy that should be used for scraping.
4621
##
4622
proxyUrl: ""
4623
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
4624
scheme: ""
4625
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
4626
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
4627
tlsConfig: {}
4628
bearerTokenFile:
4629
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
4630
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4631
##
4632
metricRelabelings: []
4633
# - action: keep
4634
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
4635
# sourceLabels: [__name__]
4636
4637
## RelabelConfigs to apply to samples before scraping
4638
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4639
##
4640
relabelings: []
4641
# - sourceLabels: [__meta_kubernetes_pod_node_name]
4642
# separator: ;
4643
# regex: ^(.*)$
4644
# targetLabel: nodename
4645
# replacement: $1
4646
# action: replace
4647
4648
## Additional Endpoints
4649
##
4650
additionalEndpoints: []
4651
# - port: oauth-metrics
4652
# path: /metrics
4653
## Settings affecting thanosRulerpec
4654
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosrulerspec
4655
##
4656
thanosRulerSpec:
4657
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
4658
## Metadata Labels and Annotations gets propagated to the ThanosRuler pods.
4659
##
4660
podMetadata: {}
4661
##
4662
serviceName:
4663
## Image of ThanosRuler
4664
##
4665
image:
4666
registry: cgr.dev
4667
repository: chainguard-private/thanos
4668
tag: latest
4669
sha: sha256:d4c37033c9f29424057f65ebe77a557e43ee0c1269c2f3b87368c0abbc1f74bd
4670
## Namespaces to be selected for PrometheusRules discovery.
4671
## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
4672
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#namespaceselector for usage
4673
##
4674
ruleNamespaceSelector: {}
4675
## If true, a nil or {} value for thanosRuler.thanosRulerSpec.ruleSelector will cause the
4676
## prometheus resource to be created with selectors based on values in the helm deployment,
4677
## which will also match the PrometheusRule resources created
4678
##
4679
ruleSelectorNilUsesHelmValues: true
4680
## PrometheusRules to be selected for target discovery.
4681
## If {}, select all PrometheusRules
4682
##
4683
ruleSelector: {}
4684
## Example which select all PrometheusRules resources
4685
## with label "prometheus" with values any of "example-rules" or "example-rules-2"
4686
# ruleSelector:
4687
# matchExpressions:
4688
# - key: prometheus
4689
# operator: In
4690
# values:
4691
# - example-rules
4692
# - example-rules-2
4693
#
4694
## Example which select all PrometheusRules resources with label "role" set to "example-rules"
4695
# ruleSelector:
4696
# matchLabels:
4697
# role: example-rules
4698
4699
## Define Log Format
4700
# Use logfmt (default) or json logging
4701
logFormat: logfmt
4702
## Log level for ThanosRuler to be configured with.
4703
##
4704
logLevel: info
4705
## Size is the expected size of the thanosRuler cluster. The controller will eventually make the size of the
4706
## running cluster equal to the expected size.
4707
replicas: 1
4708
## Time duration ThanosRuler shall retain data for. Default is '24h', and must match the regular expression
4709
## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
4710
##
4711
retention: 24h
4712
## Interval between consecutive evaluations.
4713
##
4714
evaluationInterval: ""
4715
## Storage is the definition of how storage will be used by the ThanosRuler instances.
4716
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
4717
##
4718
storage: {}
4719
# volumeClaimTemplate:
4720
# spec:
4721
# storageClassName: gluster
4722
# accessModes: ["ReadWriteOnce"]
4723
# resources:
4724
# requests:
4725
# storage: 50Gi
4726
# selector: {}
4727
4728
## AlertmanagerConfig define configuration for connecting to alertmanager.
4729
## Only available with Thanos v0.10.0 and higher. Maps to the alertmanagers.config Thanos Ruler arg.
4730
alertmanagersConfig:
4731
# use existing secret, if configured, alertmanagersConfig.secret will not be used
4732
existingSecret: {}
4733
# name: ""
4734
# key: ""
4735
# will render alertmanagersConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when alertmanagersConfig.existingSecret is set
4736
# https://thanos.io/tip/components/rule.md/#alertmanager
4737
secret: {}
4738
# alertmanagers:
4739
# - api_version: v2
4740
# http_config:
4741
# basic_auth:
4742
# username: some_user
4743
# password: some_pass
4744
# static_configs:
4745
# - alertmanager.thanos.io
4746
# scheme: http
4747
# timeout: 10s
4748
## DEPRECATED. Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, alertmanagersConfig should be used instead.
4749
## Note: this field will be ignored if alertmanagersConfig is specified. Maps to the alertmanagers.url Thanos Ruler arg.
4750
# alertmanagersUrl:
4751
4752
## The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. string false
4753
##
4754
externalPrefix:
4755
## If true, http://{{ template "kube-prometheus-stack.thanosRuler.name" . }}.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.thanosRuler.service.port }}
4756
## will be used as value for externalPrefix
4757
externalPrefixNilUsesHelmValues: true
4758
## The route prefix ThanosRuler registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
4759
## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
4760
##
4761
routePrefix: /
4762
## ObjectStorageConfig configures object storage in Thanos
4763
objectStorageConfig:
4764
# use existing secret, if configured, objectStorageConfig.secret will not be used
4765
existingSecret: {}
4766
# name: ""
4767
# key: ""
4768
# will render objectStorageConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when objectStorageConfig.existingSecret is set
4769
# https://thanos.io/tip/thanos/storage.md/#s3
4770
secret: {}
4771
# type: S3
4772
# config:
4773
# bucket: ""
4774
# endpoint: ""
4775
# region: ""
4776
# access_key: ""
4777
# secret_key: ""
4778
## Labels by name to drop before sending to alertmanager
4779
## Maps to the --alert.label-drop flag of thanos ruler.
4780
alertDropLabels: []
4781
## QueryEndpoints defines Thanos querier endpoints from which to query metrics.
4782
## Maps to the --query flag of thanos ruler.
4783
queryEndpoints: []
4784
## Define configuration for connecting to thanos query instances. If this is defined, the queryEndpoints field will be ignored.
4785
## Maps to the query.config CLI argument. Only available with thanos v0.11.0 and higher.
4786
queryConfig:
4787
# use existing secret, if configured, queryConfig.secret will not be used
4788
existingSecret: {}
4789
# name: ""
4790
# key: ""
4791
# render queryConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when queryConfig.existingSecret is set
4792
# https://thanos.io/tip/components/rule.md/#query-api
4793
secret: {}
4794
# - http_config:
4795
# basic_auth:
4796
# username: some_user
4797
# password: some_pass
4798
# static_configs:
4799
# - URL
4800
# scheme: http
4801
# timeout: 10s
4802
## Labels configure the external label pairs to ThanosRuler. A default replica
4803
## label `thanos_ruler_replica` will be always added as a label with the value
4804
## of the pod's name and it will be dropped in the alerts.
4805
labels: {}
4806
## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
4807
##
4808
paused: false
4809
## Allows setting additional arguments for the ThanosRuler container
4810
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosruler
4811
##
4812
additionalArgs: []
4813
# - name: remote-write.config
4814
# value: |-
4815
# "remote_write":
4816
# - "name": "receiver-0"
4817
# "remote_timeout": "30s"
4818
# "url": "http://thanos-receiver-0.thanos-receiver:8081/api/v1/receive"
4819
4820
## Define which Nodes the Pods are scheduled on.
4821
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
4822
##
4823
nodeSelector: {}
4824
## Define resources requests and limits for single Pods.
4825
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
4826
##
4827
resources: {}
4828
# requests:
4829
# memory: 400Mi
4830
4831
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
4832
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
4833
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
4834
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
4835
##
4836
podAntiAffinity: "soft"
4837
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
4838
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
4839
##
4840
podAntiAffinityTopologyKey: kubernetes.io/hostname
4841
## Assign custom affinity rules to the thanosRuler instance
4842
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
4843
##
4844
affinity: {}
4845
# nodeAffinity:
4846
# requiredDuringSchedulingIgnoredDuringExecution:
4847
# nodeSelectorTerms:
4848
# - matchExpressions:
4849
# - key: kubernetes.io/e2e-az-name
4850
# operator: In
4851
# values:
4852
# - e2e-az1
4853
# - e2e-az2
4854
4855
## If specified, the pod's tolerations.
4856
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
4857
##
4858
tolerations: []
4859
# - key: "key"
4860
# operator: "Equal"
4861
# value: "value"
4862
# effect: "NoSchedule"
4863
4864
## If specified, the pod's topology spread constraints.
4865
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
4866
##
4867
topologySpreadConstraints: []
4868
# - maxSkew: 1
4869
# topologyKey: topology.kubernetes.io/zone
4870
# whenUnsatisfiable: DoNotSchedule
4871
# labelSelector:
4872
# matchLabels:
4873
# app: thanos-ruler
4874
4875
## SecurityContext holds pod-level security attributes and common container settings.
4876
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
4877
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
4878
##
4879
securityContext:
4880
runAsGroup: 2000
4881
runAsNonRoot: true
4882
runAsUser: 1000
4883
fsGroup: 2000
4884
seccompProfile:
4885
type: RuntimeDefault
4886
## Use the host's user namespace for ThanosRuler pods.
4887
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
4888
hostUsers: ~
4889
## ListenLocal makes the ThanosRuler server listen on loopback, so that it does not bind against the Pod IP.
4890
## Note this is only for the ThanosRuler UI, not the gossip communication.
4891
##
4892
listenLocal: false
4893
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an ThanosRuler pod.
4894
##
4895
containers: []
4896
# Additional volumes on the output StatefulSet definition.
4897
volumes: []
4898
# Additional VolumeMounts on the output StatefulSet definition.
4899
volumeMounts: []
4900
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
4901
## (permissions, dir tree) on mounted volumes before starting prometheus
4902
initContainers: []
4903
## Priority class assigned to the Pods
4904
##
4905
priorityClassName: ""
4906
## PortName to use for ThanosRuler.
4907
##
4908
portName: "web"
4909
## Duration in seconds the pod needs to terminate gracefully.
4910
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
4911
terminationGracePeriodSeconds: ~
4912
## WebTLSConfig defines the TLS parameters for HTTPS
4913
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosrulerwebspec
4914
web: {}
4915
## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
4916
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
4917
podManagementPolicy: ""
4918
## Update strategy for the StatefulSet.
4919
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
4920
updateStrategy: {}
4921
# type: RollingUpdate
4922
# rollingUpdate:
4923
# maxUnavailable: 1
4924
4925
## Additional configuration which is not covered by the properties above. (passed through tpl)
4926
additionalConfig: {}
4927
## Additional configuration which is not covered by the properties above.
4928
## Useful, if you need advanced templating
4929
additionalConfigString: ""
4930
## ExtraSecret can be used to store various data in an extra secret
4931
## (use it for example to store hashed basic auth credentials)
4932
extraSecret:
4933
## if not set, name will be auto generated
4934
# name: ""
4935
annotations: {}
4936
data: {}
4937
# auth: |
4938
# foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
4939
# someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
4940
## Setting to true produces cleaner resource names, but requires a data migration because the name of the persistent volume changes. Therefore this should only be set once on initial installation.
4941
##
4942
cleanPrometheusOperatorObjectNames: false
4943
## Extra manifests to deploy. Can be of type dict or list.
4944
## If dict, keys are ignored and only values are used.
4945
## Items contained within extraObjects can be defined as dict or string and are passed through tpl.
4946
extraManifests: null
4947
# - apiVersion: v1
4948
# kind: ConfigMap
4949
# metadata:
4950
# labels:
4951
# name: prometheus-extra
4952
# data:
4953
# extra-data: "value"
4954
#
4955
# can also be defined as a string, useful for templating field names
4956
# - |
4957
# apiVersion: v1
4958
# kind: Secret
4959
# type: Opaque
4960
# metadata:
4961
# name: super-secret
4962
# labels:
4963
# {{- range $key, $value := .Values.commonLabels }}
4964
# {{ $key }}: {{ $value }}
4965
# {{- end }}
4966
# data:
4967
# plaintext: Zm9vYmFy
4968
# templated: '{{ print "foobar" | upper | b64enc }}'
4969

The trusted source for open source

Talk to an expert
PrivacyTerms

Product

Chainguard ContainersChainguard LibrariesChainguard VMsChainguard OS PackagesChainguard ActionsChainguard Agent SkillsIntegrationsPricing
© 2026 Chainguard, Inc. All Rights Reserved.
Chainguard® and the Chainguard logo are registered trademarks of Chainguard, Inc. in the United States and/or other countries.
The other respective trademarks mentioned on this page are owned by the respective companies and use of them does not imply any affiliation or endorsement.