1# Default values for kube-prometheus-stack.
2# This is a YAML-formatted file.
3# Declare variables to be passed into your templates.
5## Provide a name in place of kube-prometheus-stack for `app:` labels
8## Override the deployment namespace
11## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.26.6
13kubeTargetVersionOverride: ""
14## Allow kubeVersion to be overridden while creating the ingress
16kubeVersionOverride: ""
17## Provide a name to substitute for the full names of resources
20## Labels to apply to all resources
26## Install Prometheus Operator CRDs
30 ## The CRD upgrade job mitigates the limitation of helm not being able to upgrade CRDs.
31 ## The job will apply the CRDs to the cluster before the operator is deployed, using helm hooks.
32 ## It deploys a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs.
33 ## This feature is in preview, off by default and may change in the future.
40 repository: chainguard-private/busybox
42 sha: sha256:8322ce98ac75f980c930665c041aa2db8c145ca8d5f8a9a120a18f33d25ecc77
43 pullPolicy: IfNotPresent
46 repository: chainguard-private/kubectl
48 sha: sha256:b37591383f2cb5642d2477662cf330f9515dd1fea24565d3e5c56eec8de6d3c7
49 pullPolicy: IfNotPresent
51 ## Define resources requests and limits for single Pods.
52 ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
58 ## Additional volume mounts
61 ## Define which Nodes the Pods are scheduled on.
62 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
65 ## Assign custom affinity rules to the upgrade-crd job
66 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
70 # requiredDuringSchedulingIgnoredDuringExecution:
73 # - key: kubernetes.io/e2e-az-name
79 ## If specified, the pod's tolerations.
80 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
86 # effect: "NoSchedule"
88 ## If specified, the pod's topology spread constraints.
89 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
91 topologySpreadConstraints: []
93 # topologyKey: topology.kubernetes.io/zone
94 # whenUnsatisfiable: DoNotSchedule
99 # ## Labels to add to the upgrade-crd job
102 ## Annotations to add to the upgrade-crd job
105 ## Labels to add to the upgrade-crd pod
108 ## Annotations to add to the upgrade-crd pod
111 ## Service account for upgrade crd job to use.
112 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
119 automountServiceAccountToken: true
120 ## Automounting API credentials for upgrade crd job pod.
122 automountServiceAccountToken: true
123 ## Container-specific security context configuration
124 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
126 containerSecurityContext:
127 allowPrivilegeEscalation: false
128 readOnlyRootFilesystem: true
132 ## SecurityContext holds pod-level security attributes and common container settings.
133 ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
134 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
143## Custom rules to override "for" and "severity" in defaultRules
146# AlertmanagerFailedReload:
148# AlertmanagerMembersInconsistent:
152## Create default rules for monitoring the cluster
159 configReloaders: true
161 k8sContainerCpuUsageSecondsTotal: true
162 k8sContainerMemoryCache: true
163 k8sContainerMemoryRss: true
164 k8sContainerMemorySwap: true
165 k8sContainerResource: true
166 k8sContainerMemoryWorkingSetBytes: true
168 kubeApiserverAvailability: true
169 kubeApiserverBurnrate: true
170 kubeApiserverHistogram: true
171 kubeApiserverSlos: true
172 kubeControllerManager: true
175 kubePrometheusGeneral: true
176 kubePrometheusNodeRecording: true
178 kubernetesResources: true
179 kubernetesStorage: true
180 kubernetesSystem: true
181 kubeSchedulerAlerting: true
182 kubeSchedulerRecording: true
183 kubeStateMetrics: true
186 nodeExporterAlerting: true
187 nodeExporterRecording: true
189 prometheusOperator: true
191 # Defines the operator for namespace selection in rules
192 # Use "=~" to include namespaces matching the pattern (default)
193 # Use "!~" to exclude namespaces matching the pattern
194 appNamespacesOperator: "=~"
195 ## Reduce app namespace alert scope
196 appNamespacesTarget: ".*"
197 ## Set keep_firing_for for all alerts
199 ## Labels for default rules
201 ## Annotations for default rules
203 ## Additional labels for PrometheusRule alerts
204 additionalRuleLabels: {}
205 ## Additional annotations for PrometheusRule alerts
206 additionalRuleAnnotations: {}
207 ## Additional labels for specific PrometheusRule alert groups
208 additionalRuleGroupLabels:
213 k8sContainerCpuUsageSecondsTotal: {}
214 k8sContainerMemoryCache: {}
215 k8sContainerMemoryRss: {}
216 k8sContainerMemorySwap: {}
217 k8sContainerResource: {}
219 kubeApiserverAvailability: {}
220 kubeApiserverBurnrate: {}
221 kubeApiserverHistogram: {}
222 kubeApiserverSlos: {}
223 kubeControllerManager: {}
226 kubePrometheusGeneral: {}
227 kubePrometheusNodeRecording: {}
229 kubernetesResources: {}
230 kubernetesStorage: {}
232 kubeSchedulerAlerting: {}
233 kubeSchedulerRecording: {}
237 nodeExporterAlerting: {}
238 nodeExporterRecording: {}
240 prometheusOperator: {}
241 ## Additional annotations for specific PrometheusRule alert groups
242 additionalRuleGroupAnnotations:
247 k8sContainerCpuUsageSecondsTotal: {}
248 k8sContainerMemoryCache: {}
249 k8sContainerMemoryRss: {}
250 k8sContainerMemorySwap: {}
251 k8sContainerResource: {}
253 kubeApiserverAvailability: {}
254 kubeApiserverBurnrate: {}
255 kubeApiserverHistogram: {}
256 kubeApiserverSlos: {}
257 kubeControllerManager: {}
260 kubePrometheusGeneral: {}
261 kubePrometheusNodeRecording: {}
263 kubernetesResources: {}
264 kubernetesStorage: {}
266 kubeSchedulerAlerting: {}
267 kubeSchedulerRecording: {}
271 nodeExporterAlerting: {}
272 nodeExporterRecording: {}
274 prometheusOperator: {}
275 additionalAggregationLabels: []
276 ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules.
277 runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks"
279 fsSelector: 'fstype!=""'
280 # fsSelector: 'fstype=~"ext[234]|btrfs|xfs|zfs"'
281 ## Disabled PrometheusRule alerts
284 # NodeRAIDDegraded: true
285## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster.
287# additionalPrometheusRules: []
288# - name: my-rule-file
293# expr: 100 * my_record
295## Provide custom recording or alerting rules to be deployed into the cluster.
297additionalPrometheusRulesMap: {}
303# expr: 100 * my_record
309 ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs
310 ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
311 createAggregateClusterRoles: false
312 ## Global image registry to use if it needs to be overridden for some specific use cases (e.g. local registries, custom images, ...)
315 ## Reference to one or more secrets to be used when pulling images
316 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
319 # - name: "image-pull-secret"
321 # - "image-pull-secret"
323 ## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter')
325## Configuration for prometheus-windows-exporter
326## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter
328prometheus-windows-exporter:
329 ## Enable ServiceMonitor and set Kubernetes label to use as a job label
336 ## Set job label to 'windows-exporter' as required by the default Prometheus rules and Grafana dashboards
339 jobLabel: windows-exporter
340 ## Enable memory and container metrics as required by the default Prometheus rules and Grafana dashboards
344 enabled: '[defaults],memory,container'
345## Configuration for alertmanager
346## ref: https://prometheus.io/docs/alerting/alertmanager/
349 ## Deploy alertmanager
352 # Optional: Override the namespace where Alertmanager will be deployed.
353 namespaceOverride: ""
354 ## Annotations for Alertmanager
357 ## Additional labels for Alertmanager
360 ## API that Prometheus will use to communicate with alertmanager. Possible values are v1, v2
363 ## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features.
366 ## Create dashboard configmap even if alertmanager deployment has been disabled
368 forceDeployDashboards: false
369 ## Network Policy configuration
372 # -- Enable network policy for Alertmanager
374 # -- Define policy types. If egress is enabled, both Ingress and Egress will be used
375 # Valid values are ["Ingress"] or ["Ingress", "Egress"]
379 # -- Gateway (formerly ingress controller) configuration
382 # -- Gateway namespace
385 # -- Gateway pod labels
388 # app.kubernetes.io/name: ingress-nginx
389 # -- Additional custom ingress rules
391 additionalIngress: []
393 # - namespaceSelector:
395 # name: another-namespace
402 # app.kubernetes.io/name: loki
407 # -- Configure egress rules
410 # -- Enable egress rules. When enabled, policyTypes will include Egress
413 # -- Custom egress rules
417 # - namespaceSelector: {}
424 # -- Enable rules for alertmanager cluster traffic
426 enableClusterRules: true
427 # -- Configure monitoring component rules
430 # -- Enable ingress from Prometheus
433 # -- Enable ingress for config reloader metrics
436 ## Service account for Alertmanager to use.
437 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
443 automountServiceAccountToken: true
444 ## Configure pod disruption budgets for Alertmanager
445 ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
451 unhealthyPodEvictionPolicy: AlwaysAllow
452 ## Enable vertical pod autoscaler support for Alertmanager
453 ## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
455 verticalPodAutoscaler:
457 # Recommender responsible for generating recommendation for the object.
458 # List should be empty (then the default recommender will generate the recommendation)
459 # or contain exactly one recommender.
461 # - name: custom-recommender-performance
463 # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
464 controlledResources: []
465 # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
466 # controlledValues: RequestsAndLimits
468 # Define the max allowed resources for the pod
472 # Define the min allowed resources for the pod
478 # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
479 # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
481 ## Alertmanager configuration directives
482 ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
483 ## https://prometheus.io/webtools/alerting/routing-tree-editor/
490 - 'severity = critical'
492 - 'severity =~ warning|info'
497 - 'severity = warning'
504 - 'alertname = InfoInhibitor'
510 - 'alertname = InfoInhibitor'
512 group_by: ['namespace']
520 - alertname = "Watchdog"
524 - '/etc/alertmanager/config/*.tmpl'
525 ## Alertmanager configuration directives (as string type, preferred over the config hash map)
526 ## stringConfig will be used only if tplConfig is true
527 ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
528 ## https://prometheus.io/webtools/alerting/routing-tree-editor/
531 ## Pass the Alertmanager configuration directives through Helm's templating
532 ## engine. If the Alertmanager configuration contains Alertmanager templates,
533 ## they'll need to be properly escaped so that they are not interpreted by
535 ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
536 ## https://prometheus.io/docs/alerting/configuration/#tmpl_string
537 ## https://prometheus.io/docs/alerting/notifications/
538 ## https://prometheus.io/docs/alerting/notification_examples/
540 ## Alertmanager template files to format alerts
541 ## By default, templateFiles are placed in /etc/alertmanager/config/ and if
542 ## they have a .tmpl file suffix will be loaded. See config.templates above
543 ## to change, add other suffixes. If adding other suffixes, be sure to update
544 ## config.templates above to include those suffixes.
545 ## ref: https://prometheus.io/docs/alerting/notifications/
546 ## https://prometheus.io/docs/alerting/notification_examples/
550 ## An example template:
551 # template_1.tmpl: |-
552 # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
554 # {{ define "slack.myorg.text" }}
556 # {{ range .Alerts }}
557 # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
558 # *Cluster:* {{ template "cluster" $root }}
559 # *Description:* {{ .Annotations.description }}
560 # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
561 # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
563 # {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}`
573 ## Override ingress to a different defined port on the service
575 ## Override ingress to a different service then the default, this is useful if you need to
576 ## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0)
577 # serviceName: kube-prometheus-stack-alertmanager-0
579 ## Hosts must be provided if Ingress is enabled.
582 # - alertmanager.domain.com
584 ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
589 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
590 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
591 # pathType: ImplementationSpecific
593 ## TLS configuration for Alertmanager Ingress
594 ## Secret must be manually created in the namespace
597 # - secretName: alertmanager-general-tls
599 # - alertmanager.example.com
600 # -- BETA: Configure the gateway routes for the chart here.
601 # More routes can be added by adding a dictionary key like the 'main' route.
602 # Be aware that this is an early beta of this feature,
603 # kube-prometheus-stack does not guarantee this works and is subject to change.
604 # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
605 # [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
608 # -- Enables or disables the route
610 # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
611 apiVersion: gateway.networking.k8s.io/v1
612 # -- Set the route kind
613 # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
618 # - my-filter.example.com
622 # -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
623 ## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
624 ## matches, filters and additionalRules will be ignored if this is set to true. Be are
630 ## Filters define the filters that are applied to requests that match this rule.
632 ## Session persistence configuration for the route rule.
633 sessionPersistence: {}
636 # absoluteTimeout: 12h
638 # lifetimeType: Permanent
640 ## Additional custom rules that can be added to the route
642 ## Configuration for Alertmanager secret
646 ## Configuration for creating an Ingress that will map to each Alertmanager replica service
647 ## alertmanager.servicePerReplica must be enabled
654 ## Final form of the hostname for each per replica ingress is
655 ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
657 ## Prefix for the per replica ingress that will have `-$replicaNumber`
658 ## appended to the end
660 ## Domain that will be used for the per replica ingress
662 ## Paths to use for ingress rules
667 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
668 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
669 # pathType: ImplementationSpecific
671 ## Secret name containing the TLS certificate for alertmanager per replica ingress
672 ## Secret must be manually created in the namespace
674 ## Separated secret for each per replica Ingress. Can be used together with cert-manager
678 ## Final form of the secret for each per replica ingress is
679 ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
681 prefix: "alertmanager"
682 ## Configuration for Alertmanager service
691 ipFamilies: ["IPv6", "IPv4"]
692 ipFamilyPolicy: "PreferDualStack"
693 ## Port for Alertmanager Service to listen on
696 ## Port for Alertmanager cluster communication
699 ## To be used with a proxy extraContainer port
702 ## Port to expose on each node
703 ## Only used if service.type is 'NodePort'
706 ## List of IP addresses at which the Prometheus server service is available
707 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
710 ## Additional ports to open for Alertmanager service
713 # - name: oauth-proxy
716 # - name: oauth-metrics
722 loadBalancerSourceRanges: []
723 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
725 externalTrafficPolicy: Cluster
726 ## If you want to make sure that connections from a particular client are passed to the same Pod each time
727 ## Accepts 'ClientIP' or 'None'
729 sessionAffinity: None
730 ## If you want to modify the ClientIP sessionAffinity timeout
731 ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
733 sessionAffinityConfig:
735 timeoutSeconds: 10800
739 ## Configuration for creating a separate Service for each statefulset Alertmanager replica
744 ## Port for Alertmanager Service per replica to listen on
747 ## To be used with a proxy extraContainer port
749 ## Port to expose on each node
750 ## Only used if servicePerReplica.type is 'NodePort'
753 ## Loadbalancer source IP ranges
754 ## Only used if servicePerReplica.type is "LoadBalancer"
755 loadBalancerSourceRanges: []
756 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
758 externalTrafficPolicy: Cluster
762 ## Configuration for creating a ServiceMonitor for AlertManager
765 ## If true, a ServiceMonitor will be created for the AlertManager service.
768 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
774 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
777 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
780 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
783 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
785 labelNameLengthLimit: 0
786 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
788 labelValueLengthLimit: 0
789 ## proxyUrl: URL of a proxy that should be used for scraping.
792 ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
794 ## enableHttp2: Whether to enable HTTP2.
795 ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#endpoint
797 ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
798 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
801 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
802 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
804 metricRelabelings: []
806 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
807 # sourceLabels: [__name__]
809 ## RelabelConfigs to apply to samples before scraping
810 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
813 # - sourceLabels: [__meta_kubernetes_pod_node_name]
816 # targetLabel: nodename
820 ## Additional Endpoints
822 additionalEndpoints: []
823 # - port: oauth-metrics
825 ## Settings affecting alertmanagerSpec
826 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerspec
829 ## Statefulset's persistent volume claim retention policy
830 ## whenDeleted and whenScaled determine whether
831 ## statefulset's PVCs are deleted (true) or retained (false)
832 ## on scaling down and deleting statefulset, respectively.
833 ## Requires Kubernetes version 1.27.0+.
834 ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
835 persistentVolumeClaimRetentionPolicy: {}
836 # whenDeleted: Retain
839 ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
840 ## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
845 ## Image of Alertmanager
849 repository: chainguard-private/prometheus-alertmanager
851 sha: sha256:b8361113442ad39e2f8e3cf70e0621e8a0e0975d53d79c3646ad875ae9301da8
852 pullPolicy: IfNotPresent
853 ## If true then the user will be responsible to provide a secret with alertmanager configuration
854 ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
856 useExistingSecret: false
857 ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
858 ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
861 ## If false then the user will opt out of automounting API credentials.
863 automountServiceAccountToken: true
864 ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
865 ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
868 ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
869 ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
873 ## WebTLSConfig defines the TLS parameters for HTTPS
874 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerwebspec
876 ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
878 alertmanagerConfigSelector: {}
879 ## Example which selects all alertmanagerConfig resources
880 ## with label "alertconfig" with values any of "example-config" or "example-config-2"
881 # alertmanagerConfigSelector:
889 ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config"
890 # alertmanagerConfigSelector:
892 # role: example-config
894 ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace.
896 alertmanagerConfigNamespaceSelector: {}
897 ## Example which selects all namespaces
898 ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2"
899 # alertmanagerConfigNamespaceSelector:
901 # - key: alertmanagerconfig
904 # - example-namespace
905 # - example-namespace-2
907 ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled"
908 # alertmanagerConfigNamespaceSelector:
910 # alertmanagerconfig: enabled
912 ## AlermanagerConfig to be used as top level configuration
914 alertmanagerConfiguration: {}
915 ## Example with select a global alertmanagerconfig
916 # alertmanagerConfiguration:
917 # name: global-alertmanager-Configuration
919 ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg:
921 alertmanagerConfigMatcherStrategy: {}
922 ## Example with use OnNamespace strategy
923 # alertmanagerConfigMatcherStrategy:
926 ## Additional command line arguments to pass to Alertmanager (in addition to those generated by the chart)
929 # Use logfmt (default) or json logging
931 ## Log level for Alertmanager to be configured with.
934 ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
935 ## running cluster equal to the expected size.
937 ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
938 ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
941 ## Storage is the definition of how storage will be used by the Alertmanager instances.
942 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
945 # volumeClaimTemplate:
947 # storageClassName: gluster
948 # accessModes: ["ReadWriteOnce"]
954 ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
957 ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
958 ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
961 ## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS.
963 ## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS.
964 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
966 ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
969 ## Define which Nodes the Pods are scheduled on.
970 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
973 ## Define resources requests and limits for single Pods.
974 ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
980 ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
981 ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
982 ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
983 ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
985 podAntiAffinity: "soft"
986 ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
987 ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
989 podAntiAffinityTopologyKey: kubernetes.io/hostname
990 ## Assign custom affinity rules to the alertmanager instance
991 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
995 # requiredDuringSchedulingIgnoredDuringExecution:
997 # - matchExpressions:
998 # - key: kubernetes.io/e2e-az-name
1004 ## If specified, the pod's tolerations.
1005 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
1011 # effect: "NoSchedule"
1013 ## If specified, the pod's topology spread constraints.
1014 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
1016 topologySpreadConstraints: []
1018 # topologyKey: topology.kubernetes.io/zone
1019 # whenUnsatisfiable: DoNotSchedule
1024 ## SecurityContext holds pod-level security attributes and common container settings.
1025 ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
1026 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
1034 type: RuntimeDefault
1035 ## Use the host's user namespace for Alertmanager pods.
1036 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
1038 ## DNS configuration for Alertmanager.
1039 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig
1041 ## DNS policy for Alertmanager.
1042 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias
1044 ## Enable hostNetwork for Alertmanager.
1046 ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
1047 ## Note this is only for the Alertmanager UI, not the gossip communication.
1050 ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
1054 # - name: oauth-proxy
1055 # image: quay.io/oauth2-proxy/oauth2-proxy:v7.15.2
1057 # - --upstream=http://127.0.0.1:9093
1058 # - --http-address=0.0.0.0:8081
1059 # - --metrics-address=0.0.0.0:8082
1062 # - containerPort: 8081
1065 # - containerPort: 8082
1066 # name: oauth-metrics
1070 # Additional volumes on the output StatefulSet definition.
1072 # Additional VolumeMounts on the output StatefulSet definition.
1074 ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
1075 ## (permissions, dir tree) on mounted volumes before starting prometheus
1077 ## Priority class assigned to the Pods
1079 priorityClassName: ""
1080 ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
1083 ## PortName to use for Alert Manager.
1085 portName: "http-web"
1086 ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
1088 clusterAdvertiseAddress: false
1089 ## clusterGossipInterval determines interval between gossip attempts.
1090 ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1091 clusterGossipInterval: ""
1092 ## clusterPeerTimeout determines timeout for cluster peering.
1093 ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1094 clusterPeerTimeout: ""
1095 ## clusterPushpullInterval determines interval between pushpull attempts.
1096 ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1097 clusterPushpullInterval: ""
1098 ## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster.
1100 ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
1101 ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
1102 forceEnableClusterMode: false
1103 ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
1104 ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
1106 ## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
1107 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
1108 podManagementPolicy: ""
1109 ## Update strategy for the StatefulSet.
1110 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
1112 # type: RollingUpdate
1116 ## Duration in seconds the pod needs to terminate gracefully.
1117 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
1118 terminationGracePeriodSeconds: ~
1119 ## Additional configuration which is not covered by the properties above. (passed through tpl)
1120 additionalConfig: {}
1121 ## Additional configuration which is not covered by the properties above.
1122 ## Useful, if you need advanced templating inside alertmanagerSpec.
1123 ## Otherwise, use alertmanager.alertmanagerSpec.additionalConfig (passed through tpl)
1124 additionalConfigString: ""
1125 ## ExtraSecret can be used to store various data in an extra secret
1126 ## (use it for example to store hashed basic auth credentials)
1128 ## if not set, name will be auto generated
1133 # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
1134 # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
1135## Using default values from https://github.com/grafana-community/helm-charts/blob/main/charts/grafana/values.yaml
1139 namespaceOverride: ""
1140 ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
1142 forceDeployDatasources: false
1143 ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
1145 forceDeployDashboards: false
1146 ## Deploy default dashboards
1148 defaultDashboardsEnabled: true
1149 ## Deploy GrafanaDashboard CRDs that reference dashboards from ConfigMaps when grafana-operator is used
1150 ## These settings control how dashboards are integrated with the Grafana Operator
1151 ## Note: End user still need to create is own kind: GrafanaDataSource for Prometheus
1153 ## apiVersion: grafana.integreatly.org/v1beta1
1154 ## kind: GrafanaDatasource
1159 ## allowCrossNamespaceImport: true
1160 ## instanceSelector:
1167 ## url: http://prometheus-operated.prometheus-stack.svc.cluster.local:9090
1170 ## "tlsSkipVerify": true
1171 ## "timeInterval": "5s"
1174 ## Enable references to ConfigMaps containing dashboards in GrafanaDashboard CRs
1175 ## Set to true to allow dashboards to be loaded from ConfigMap references
1176 dashboardsConfigMapRefEnabled: false
1177 ## Annotations for GrafanaDashboard Cr
1180 ## Labels that should be matched kind: Grafana instance
1181 ## Example: { app: grafana, category: dashboard }
1184 ## How frequently the operator should resync resources (in duration format)
1185 ## Controls how often dashboards are reconciled by the operator
1188 ## Which folder contains all dashboards in Grafana
1189 ## This folder will be created on the Root level
1190 ## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1193 ## Which UID of the target folder contains all dashboards in Grafana
1194 ## This allows you to use subfolder hierarchy
1195 ## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1198 ## Which GrafanaFolder reference contains all dashboards in Grafana
1199 ## This allows you to use subfolder hierarchy.
1200 ## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1203 ## Timezone for the default dashboards
1204 ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
1206 defaultDashboardsTimezone: utc
1207 ## Editable flag for the default dashboards
1209 defaultDashboardsEditable: true
1210 ## Default interval for Grafana dashboards
1212 defaultDashboardsInterval: 1m
1213 # Administrator credentials when not using an existing secret (see below)
1215 # adminPassword: strongpassword
1217 # Use an existing secret for the admin user.
1219 ## Name of the secret. Can be templated.
1222 passwordKey: admin-password
1224 ## If true, Grafana PSPs will be created
1228 ## If true, Grafana Ingress will be created
1231 ## IngressClassName for Grafana Ingress.
1232 ## Should be provided if Ingress is enable.
1234 # ingressClassName: nginx
1236 ## Annotations for Grafana Ingress
1239 # kubernetes.io/ingress.class: nginx
1240 # kubernetes.io/tls-acme: "true"
1242 ## Labels to be added to the Ingress
1246 ## Must be provided if Ingress is enable.
1249 # - grafana.domain.com
1251 ## Path for grafana ingress
1253 ## TLS configuration for grafana Ingress
1254 ## Secret must be manually created in the namespace
1257 # - secretName: grafana-general-tls
1259 # - grafana.example.com
1260 # # To make Grafana persistent (Using Statefulset)
1265 # storageClassName: "storageClassName"
1270 # - kubernetes.io/pvc-protection
1277 label: grafana_dashboard
1279 # Allow discovery in all namespaces for dashboards
1280 searchNamespace: ALL
1281 # Support for new table panels, when enabled grafana auto migrates the old table panels to newer table panels
1282 enableNewTablePanelSyntax: false
1283 ## Annotations for Grafana dashboard configmaps
1292 allowUiUpdates: false
1295 defaultDatasourceEnabled: true
1296 isDefaultDatasource: true
1299 ## Extra jsonData properties to add to the datasource
1301 # prometheusType: Prometheus
1303 ## URL of prometheus datasource
1305 # url: http://prometheus-stack-prometheus:9090/
1307 ## Prometheus request timeout in seconds
1310 ## Query parameters to add, as a URL-encoded string,
1311 ## to query Prometheus
1312 # customQueryParameters: ""
1314 # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
1315 # defaultDatasourceScrapeInterval: 15s
1317 ## Annotations for Grafana datasource configmaps
1320 ## Set method for HTTP to send query to datasource
1322 ## Create datasource for each Pod of Prometheus StatefulSet;
1323 ## this uses by default the headless service `prometheus-operated` which is
1324 ## created by Prometheus Operator. In case you deployed your own Service for your
1325 ## Prometheus instance, you can specify it with the field `prometheusServiceName`
1326 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286
1327 createPrometheusReplicasDatasources: false
1328 prometheusServiceName: prometheus-operated
1329 label: grafana_datasource
1331 ## Field with internal link pointing to existing data source in Grafana.
1332 ## Can be provisioned via additionalDataSources
1333 exemplarTraceIdDestinations: {}
1334 # datasourceUid: Jaeger
1335 # traceIdLabelName: trace_id
1336 # urlDisplayLabel: View traces
1341 handleGrafanaManagedAlerts: false
1342 implementation: prometheus
1343 extraConfigmapMounts: []
1344 # - name: certs-configmap
1345 # mountPath: /etc/grafana/ssl/
1346 # configMap: certs-configmap
1349 deleteDatasources: []
1350 # - name: example-datasource
1353 ## Configure additional grafana datasources (passed through tpl)
1354 ## ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
1355 additionalDataSources: []
1356 # - name: prometheus-sample
1360 # basicAuthPassword: pass
1361 # basicAuthUser: daco
1364 # tlsSkipVerify: true
1367 # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
1370 ## Configure additional grafana datasources as a templated string (passed through tpl)
1371 ## Useful when you need Helm flow control or templating inside the datasource definition
1372 additionalDataSourcesString: ""
1373 # Flag to mark provisioned data sources for deletion if they are no longer configured.
1374 # It takes no effect if data sources are already listed in the deleteDatasources section.
1375 # ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-configuration-file
1377 ## Passed to grafana subchart and used by servicemonitor below
1384 # If true, a ServiceMonitor CRD is created for a prometheus operator
1385 # https://github.com/prometheus-operator/prometheus-operator
1388 # Path to use for scraping metrics. Might be different if server.root_url is set
1391 # namespace: monitoring (defaults to use the namespace this chart is deployed to)
1393 # labels for the ServiceMonitor
1395 # Scrape interval. If not set, the Prometheus default scrape interval is used.
1401 ## RelabelConfigs to apply to samples before scraping
1402 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1405 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1408 # targetLabel: nodename
1411## Flag to disable all the kubernetes component scrapers
1413kubernetesServiceMonitors:
1415## Component scraping the kube api server
1420 serverName: kubernetes
1421 insecureSkipVerify: false
1424 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1427 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1430 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1433 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1436 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1438 labelNameLengthLimit: 0
1439 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1441 labelValueLengthLimit: 0
1442 ## proxyUrl: URL of a proxy that should be used for scraping.
1448 component: apiserver
1449 provider: kubernetes
1450 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1451 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1454 # Drop excessively noisy apiserver buckets.
1456 regex: (etcd_request|apiserver_request_slo|apiserver_request_sli|apiserver_request)_duration_seconds_bucket;(0\.15|0\.2|0\.3|0\.35|0\.4|0\.45|0\.6|0\.7|0\.8|0\.9|1\.25|1\.5|1\.75|2|3|3\.5|4|4\.5|6|7|8|9|15|20|40|45|50)(\.0)?
1461 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1462 # sourceLabels: [__name__]
1464 ## RelabelConfigs to apply to samples before scraping
1465 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1469 # - __meta_kubernetes_namespace
1470 # - __meta_kubernetes_service_name
1471 # - __meta_kubernetes_endpoint_port_name
1473 # regex: default;kubernetes;https
1474 # - targetLabel: __address__
1475 # replacement: kubernetes.default.svc:443
1477 ## Additional labels
1479 additionalLabels: {}
1482 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1483 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1485 ## Override the job label used for the apiserver.
1486 ## This allows users who scrape apiserver metrics under a different job name (e.g. k3s-server via PushProx)
1487 ## to align the recording rules and alerts with their actual job label.
1489## Component scraping the kubelet and kubelet-hosted cAdvisor
1493 namespace: kube-system
1494 # Overrides the job selector in Grafana dashboards and Prometheus rules
1495 # For k3s clusters, change to k3s-server
1499 ## Enable scraping /metrics from kubelet's service
1501 ## Attach metadata to discovered targets. Requires Prometheus v2.45 for endpoints created by the operator.
1505 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1508 ## If true, Prometheus use (respect) labels provided by exporter.
1511 ## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape.
1513 honorTimestamps: true
1514 ## If true, defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false.
1515 ## We recommend enabling this if you want the best possible accuracy for container_ metrics scraped from cadvisor.
1516 ## For more details see: https://github.com/prometheus-community/helm-charts/pull/5063#issuecomment-2545374849
1517 trackTimestampsStaleness: true
1518 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1521 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1524 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1527 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1529 labelNameLengthLimit: 0
1530 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1532 labelValueLengthLimit: 0
1533 ## proxyUrl: URL of a proxy that should be used for scraping.
1536 ## Enable scraping the kubelet over https. For requirements to enable this see
1537 ## https://github.com/prometheus-operator/prometheus-operator/issues/926
1540 ## Skip TLS certificate validation when scraping.
1541 ## This is enabled by default because kubelet serving certificate deployed by kubeadm is by default self-signed
1542 ## ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs
1544 insecureSkipVerify: true
1545 ## Enable scraping /metrics/probes from kubelet's service
1548 ## Enable scraping /metrics/resource from kubelet's service
1549 ## This is disabled by default because container metrics are already exposed by cAdvisor
1552 # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
1553 resourcePath: "/metrics/resource/v1alpha1"
1554 ## Configure the scrape interval for resource metrics. This is configured to the default Kubelet cAdvisor
1555 ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
1556 ## if kubelet.serviceMonitor.interval is not empty.
1557 resourceInterval: 10s
1558 ## Enable scraping /metrics/cadvisor from kubelet's service
1561 ## Configure the scrape interval for cAdvisor. This is configured to the default Kubelet cAdvisor
1562 ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
1563 ## if kubelet.serviceMonitor.interval is not empty.
1564 cAdvisorInterval: 10s
1565 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1566 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1568 cAdvisorMetricRelabelings:
1569 # Drop less useful container CPU metrics.
1570 - sourceLabels: [__name__]
1572 regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
1573 # Drop less useful container / always zero filesystem metrics.
1574 - sourceLabels: [__name__]
1576 regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
1577 # Drop less useful / always zero container memory metrics.
1578 - sourceLabels: [__name__]
1580 regex: 'container_memory_(mapped_file|swap)'
1581 # Drop less useful container process metrics.
1582 - sourceLabels: [__name__]
1584 regex: 'container_(file_descriptors|tasks_state|threads_max)'
1585 # Drop container_memory_failures_total{scope="hierarchy"} metrics,
1586 # we only need the container scope.
1587 - sourceLabels: [__name__, scope]
1589 regex: 'container_memory_failures_total;hierarchy'
1590 # Drop container_network_... metrics that match various interfaces that
1591 # correspond to CNI and similar interfaces. This avoids capturing network
1592 # metrics for host network containers.
1593 - sourceLabels: [__name__, interface]
1595 regex: 'container_network_.*;(cali|cilium|cni|lxc|nodelocaldns|tunl).*'
1596 # Drop container spec metrics that overlap with kube-state-metrics.
1597 - sourceLabels: [__name__]
1599 regex: 'container_spec.*'
1600 # Drop cgroup metrics with no pod.
1601 - sourceLabels: [id, pod]
1604 # - sourceLabels: [__name__, image]
1606 # regex: container_([a-z_]+);
1609 # - sourceLabels: [__name__]
1611 # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1615 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1616 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1618 probesMetricRelabelings: []
1619 # - sourceLabels: [__name__, image]
1621 # regex: container_([a-z_]+);
1624 # - sourceLabels: [__name__]
1626 # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1630 ## RelabelConfigs to apply to samples before scraping
1631 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1633 ## metrics_path is required to match upstream rules and charts
1634 cAdvisorRelabelings:
1636 sourceLabels: [__metrics_path__]
1637 targetLabel: metrics_path
1638 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1641 # targetLabel: nodename
1645 ## RelabelConfigs to apply to samples before scraping
1646 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1650 sourceLabels: [__metrics_path__]
1651 targetLabel: metrics_path
1652 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1655 # targetLabel: nodename
1659 ## RelabelConfigs to apply to samples before scraping
1660 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1662 resourceRelabelings:
1664 sourceLabels: [__metrics_path__]
1665 targetLabel: metrics_path
1666 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1669 # targetLabel: nodename
1673 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1674 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1677 # Reduce bucket cardinality of kubelet storage operations.
1679 sourceLabels: [__name__, le]
1680 regex: (csi_operations|storage_operation_duration)_seconds_bucket;(0.25|2.5|15|25|120|600)(\.0)?
1681 # - sourceLabels: [__name__, image]
1683 # regex: container_([a-z_]+);
1686 # - sourceLabels: [__name__]
1688 # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1692 ## RelabelConfigs to apply to samples before scraping
1693 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1695 ## metrics_path is required to match upstream rules and charts
1698 sourceLabels: [__metrics_path__]
1699 targetLabel: metrics_path
1700 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1703 # targetLabel: nodename
1707 ## Additional labels
1709 additionalLabels: {}
1712 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1713 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1715## Component scraping the kube controller manager
1717kubeControllerManager:
1719 # Overrides the job selector in Grafana dashboards and Prometheus rules
1720 # For k3s clusters, change to k3s-server
1722 ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
1729 ## If using kubeControllerManager.endpoints only the port and targetPort are used
1733 ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
1734 ## of default port in Kubernetes 1.22.
1740 ipFamilies: ["IPv6", "IPv4"]
1741 ipFamilyPolicy: "PreferDualStack"
1743 # component: kube-controller-manager
1746 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1749 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1752 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1755 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1758 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1760 labelNameLengthLimit: 0
1761 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1763 labelValueLengthLimit: 0
1764 ## proxyUrl: URL of a proxy that should be used for scraping.
1767 ## port: Name of the port the metrics will be scraped from
1773 # component: kube-controller-manager
1775 ## Enable scraping kube-controller-manager over https.
1776 ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
1777 ## If null or unset, the value is determined dynamically based on target Kubernetes version.
1780 # Skip TLS certificate validation when scraping
1781 insecureSkipVerify: null
1782 # Name of the server to use when validating TLS certificate
1784 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1785 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1787 metricRelabelings: []
1789 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1790 # sourceLabels: [__name__]
1792 ## RelabelConfigs to apply to samples before scraping
1793 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1796 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1799 # targetLabel: nodename
1803 ## Additional labels
1805 additionalLabels: {}
1808 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1809 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1811## Component scraping coreDns. Use either this or kubeDns
1821 ipFamilies: ["IPv6", "IPv4"]
1822 ipFamilyPolicy: "PreferDualStack"
1827 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1830 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1833 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1836 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1839 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1841 labelNameLengthLimit: 0
1842 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1844 labelValueLengthLimit: 0
1845 ## proxyUrl: URL of a proxy that should be used for scraping.
1848 ## port: Name of the port the metrics will be scraped from
1856 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1857 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1859 metricRelabelings: []
1861 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1862 # sourceLabels: [__name__]
1864 ## RelabelConfigs to apply to samples before scraping
1865 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1868 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1871 # targetLabel: nodename
1875 ## Additional labels
1877 additionalLabels: {}
1880 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1881 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1883 ## File containing bearer token to be used when scraping targets
1884 ## Empty value do not send any bearer token.
1886 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
1887## Component scraping kubeDns. Use either this or coreDns
1900 ipFamilies: ["IPv6", "IPv4"]
1901 ipFamilyPolicy: "PreferDualStack"
1905 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1908 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1911 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1914 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1917 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1919 labelNameLengthLimit: 0
1920 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1922 labelValueLengthLimit: 0
1923 ## proxyUrl: URL of a proxy that should be used for scraping.
1931 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1932 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1934 metricRelabelings: []
1936 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1937 # sourceLabels: [__name__]
1939 ## RelabelConfigs to apply to samples before scraping
1940 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1943 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1946 # targetLabel: nodename
1950 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1951 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1953 dnsmasqMetricRelabelings: []
1955 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1956 # sourceLabels: [__name__]
1958 ## RelabelConfigs to apply to samples before scraping
1959 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1961 dnsmasqRelabelings: []
1962 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1965 # targetLabel: nodename
1969 ## Additional labels
1971 additionalLabels: {}
1974 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1975 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1977 ## File containing bearer token to be used when scraping targets
1978 ## Empty value do not send any bearer token.
1980 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
1981## Component scraping etcd
1985 ## If your etcd is not deployed as a pod, specify IPs it can be found on
1992 ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
2000 ipFamilies: ["IPv6", "IPv4"]
2001 ipFamilyPolicy: "PreferDualStack"
2004 ## Configure secure access to the etcd cluster by loading a secret into prometheus and
2005 ## specifying security configuration below. For example, with a secret named etcd-client-cert
2009 ## insecureSkipVerify: false
2010 ## serverName: localhost
2011 ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
2012 ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
2013 ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
2017 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2020 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2023 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2026 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2029 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2031 labelNameLengthLimit: 0
2032 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2034 labelValueLengthLimit: 0
2035 ## proxyUrl: URL of a proxy that should be used for scraping.
2039 insecureSkipVerify: false
2044 ## port: Name of the port the metrics will be scraped from
2052 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2053 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2055 metricRelabelings: []
2057 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2058 # sourceLabels: [__name__]
2060 ## RelabelConfigs to apply to samples before scraping
2061 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2064 # - sourceLabels: [__meta_kubernetes_pod_node_name]
2067 # targetLabel: nodename
2071 ## Additional labels
2073 additionalLabels: {}
2076 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2077 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2079 ## File containing bearer token to be used when scraping targets
2080 ## Empty value do not send any bearer token.
2082 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
2083## Component scraping kube scheduler
2087 # Overrides the job selector in Grafana dashboards and Prometheus rules
2088 # For k3s clusters, change to k3s-server
2090 ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
2097 ## If using kubeScheduler.endpoints only the port and targetPort are used
2101 ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
2102 ## of default port in Kubernetes 1.23.
2108 ipFamilies: ["IPv6", "IPv4"]
2109 ipFamilyPolicy: "PreferDualStack"
2111 # component: kube-scheduler
2114 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2117 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2120 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2123 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2126 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2128 labelNameLengthLimit: 0
2129 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2131 labelValueLengthLimit: 0
2132 ## proxyUrl: URL of a proxy that should be used for scraping.
2135 ## Enable scraping kube-scheduler over https.
2136 ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
2137 ## If null or unset, the value is determined dynamically based on target Kubernetes version.
2140 ## port: Name of the port the metrics will be scraped from
2146 # component: kube-scheduler
2148 ## Skip TLS certificate validation when scraping
2149 insecureSkipVerify: null
2150 ## Name of the server to use when validating TLS certificate
2152 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2153 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2155 metricRelabelings: []
2157 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2158 # sourceLabels: [__name__]
2160 ## RelabelConfigs to apply to samples before scraping
2161 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2164 # - sourceLabels: [__meta_kubernetes_pod_node_name]
2167 # targetLabel: nodename
2171 ## Additional labels
2173 additionalLabels: {}
2176 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2177 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2179## Component scraping kube proxy
2183 # Overrides the job selector in Grafana dashboards and Prometheus rules
2184 # For k3s clusters, change to k3s-server
2186 ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
2199 ipFamilies: ["IPv6", "IPv4"]
2200 ipFamilyPolicy: "PreferDualStack"
2202 # k8s-app: kube-proxy
2205 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2208 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2211 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2214 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2217 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2219 labelNameLengthLimit: 0
2220 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2222 labelValueLengthLimit: 0
2223 ## proxyUrl: URL of a proxy that should be used for scraping.
2226 ## port: Name of the port the metrics will be scraped from
2232 # k8s-app: kube-proxy
2234 ## Enable scraping kube-proxy over https.
2235 ## Requires proper certs (not self-signed) and delegated authentication/authorization checks
2238 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2239 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2241 metricRelabelings: []
2243 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2244 # sourceLabels: [__name__]
2246 ## RelabelConfigs to apply to samples before scraping
2247 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2251 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2252 # sourceLabels: [__name__]
2254 ## Additional labels
2256 additionalLabels: {}
2259 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2260 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2262 ## File containing bearer token to be used when scraping targets
2263 ## Empty value do not send any bearer token.
2265 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
2266## Component scraping kube state metrics
2270## Configuration for kube-state-metrics subchart
2273 ## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box
2275 ## Enable scraping via kubernetes-service-endpoints
2276 ## Disabled by default as we service monitor is enabled below
2278 prometheusScrape: false
2281 ## Enable scraping via service monitor
2282 ## Disable to prevent duplication if you enable prometheusScrape above
2284 ## kube-state-metrics endpoint
2286 ## Keep labels from scraped data, overriding server-side labels
2288 ## selfMonitor endpoint
2290 ## Keep labels from scraped data, overriding server-side labels
2292## Deploy node exporter as a daemonset to all nodes
2303 ## ForceDeployDashboard Create dashboard configmap even if nodeExporter deployment has been disabled
2305 forceDeployDashboards: false
2306## Configuration for prometheus-node-exporter subchart
2308prometheus-node-exporter:
2309 namespaceOverride: ""
2311 ## Add the 'node-exporter' label to be used by serviceMonitor and podMonitor to match standard common usage in rules and grafana dashboards
2313 jobLabel: node-exporter
2316 - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/containerd/.+|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
2317 - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs|erofs)$
2319 portName: http-metrics
2322 ipFamilies: ["IPv6", "IPv4"]
2323 ipFamilyPolicy: "PreferDualStack"
2325 jobLabel: node-exporter
2332 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2335 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2338 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2341 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2344 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2346 labelNameLengthLimit: 0
2347 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2349 labelValueLengthLimit: 0
2350 ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
2353 ## proxyUrl: URL of a proxy that should be used for scraping.
2356 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2357 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2359 metricRelabelings: []
2360 # - sourceLabels: [__name__]
2362 # regex: ^node_mountstats_nfs_(event|operations|transport)_.+
2366 ## RelabelConfigs to apply to samples before scraping
2367 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2370 # - sourceLabels: [__meta_kubernetes_pod_node_name]
2373 # targetLabel: nodename
2376 ## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above.
2385 ## If true, create PSPs for node-exporter
2388## Manages Prometheus and Alertmanager components
2392 ## Use '{{ template "kube-prometheus-stack.fullname" . }}-operator' by default
2393 fullnameOverride: ""
2394 ## Number of old replicasets to retain ##
2395 ## The default value is 10, 0 will garbage-collect old replicasets ##
2396 revisionHistoryLimit: 10
2397 ## Strategy of the deployment
2400 ## Prometheus-Operator v0.39.0 and later support TLS natively.
2404 # Value must match version names from https://pkg.go.dev/crypto/tls#pkg-constants
2405 tlsMinVersion: VersionTLS13
2406 # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
2408 ## Liveness probe for the prometheusOperator deployment
2413 initialDelaySeconds: 0
2417 ## Readiness probe for the prometheusOperator deployment
2422 initialDelaySeconds: 0
2426 ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
2427 ## rules from making their way into prometheus and potentially preventing the container from starting
2429 ## Valid values: Fail, Ignore, IgnoreOnInstallOnly
2430 ## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail"
2432 ## The default timeoutSeconds is 10 and the maximum value is 30.
2435 ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
2436 ## If unspecified, system trust roots on the apiserver are used.
2438 ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
2439 ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
2440 ## certs ahead of time if you wish.
2443 # argocd.argoproj.io/hook: PreSync
2444 # argocd.argoproj.io/hook-delete-policy: HookSucceeded
2446 namespaceSelector: {}
2449 mutatingWebhookConfiguration:
2451 # argocd.argoproj.io/hook: PreSync
2452 validatingWebhookConfiguration:
2454 # argocd.argoproj.io/hook: PreSync
2457 ## Number of replicas
2460 ## Strategy of the deployment
2463 # Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
2464 podDisruptionBudget:
2467 # maxUnavailable: ""
2468 unhealthyPodEvictionPolicy: AlwaysAllow
2469 ## Number of old replicasets to retain ##
2470 ## The default value is 10, 0 will garbage-collect old replicasets ##
2471 revisionHistoryLimit: 10
2472 ## Prometheus-Operator v0.39.0 and later support TLS natively.
2476 # Value must match version names from https://pkg.go.dev/crypto/tls#pkg-constants
2477 tlsMinVersion: VersionTLS13
2478 # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
2480 ## Service account for Prometheus Operator Webhook to use.
2481 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2485 automountServiceAccountToken: false
2488 ## Configuration for Prometheus operator Webhook service
2496 ipFamilies: ["IPv6", "IPv4"]
2497 ipFamilyPolicy: "PreferDualStack"
2498 ## Port to expose on each node
2499 ## Only used if service.type is 'NodePort'
2503 ## Additional ports to open for Prometheus operator Webhook service
2504 ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
2508 ## Only use if service.type is "LoadBalancer"
2511 loadBalancerSourceRanges: []
2512 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
2514 externalTrafficPolicy: Cluster
2516 ## NodePort, ClusterIP, LoadBalancer
2519 ## List of IP addresses at which the Prometheus server service is available
2520 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
2523 # ## Labels to add to the operator webhook deployment
2526 ## Annotations to add to the operator webhook deployment
2529 ## Labels to add to the operator webhook pod
2532 ## Annotations to add to the operator webhook pod
2535 ## Assign a PriorityClassName to pods if set
2536 # priorityClassName: ""
2538 ## Define Log Format
2539 # Use logfmt (default) or json logging
2542 ## Decrease log verbosity to errors only
2545 ## Prometheus-operator webhook image
2549 repository: chainguard-private/prometheus-admission-webhook
2550 # if not set appVersion field from Chart.yaml is used
2552 sha: sha256:82fe3e3be35b1e38eb4b7203389e4e938e1b1b756f3dba9d25866f853f6f98e9
2553 pullPolicy: IfNotPresent
2554 ## Define Log Format
2555 # Use logfmt (default) or json logging
2558 ## Decrease log verbosity to errors only
2566 initialDelaySeconds: 30
2575 initialDelaySeconds: 5
2579 ## Resource limits & requests
2589 # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
2590 # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
2593 ## Define which Nodes the Pods are scheduled on.
2594 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
2597 ## Tolerations for use with node taints
2598 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
2604 # effect: "NoSchedule"
2606 ## Assign custom affinity rules to the prometheus operator
2607 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
2611 # requiredDuringSchedulingIgnoredDuringExecution:
2612 # nodeSelectorTerms:
2613 # - matchExpressions:
2614 # - key: kubernetes.io/e2e-az-name
2623 # - ns1.svc.cluster-domain.example
2624 # - my.dns.search.suffix
2635 type: RuntimeDefault
2636 ## Container-specific security context configuration
2637 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2639 containerSecurityContext:
2640 allowPrivilegeEscalation: false
2641 readOnlyRootFilesystem: true
2645 ## If false then the user will opt out of automounting API credentials.
2647 automountServiceAccountToken: true
2652 repository: chainguard-private/kube-webhook-certgen
2654 sha: sha256:127961d6034e96d92cf9ae2fd460e39fa790eb5d1ebcb80acf0e833bc9b22546
2655 pullPolicy: IfNotPresent
2657 ## Provide a priority class name to the webhook patching job
2659 priorityClassName: ""
2660 ttlSecondsAfterFinished: 60
2662 # argocd.argoproj.io/hook: PreSync
2663 # argocd.argoproj.io/hook-delete-policy: HookSucceeded
2668 ## SecurityContext holds pod-level security attributes and common container settings.
2669 ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false
2670 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2677 type: RuntimeDefault
2678 ## Service account for Prometheus Operator Webhook Job Patch to use.
2679 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2684 automountServiceAccountToken: true
2685 # Security context for create job container
2688 allowPrivilegeEscalation: false
2689 readOnlyRootFilesystem: true
2693 # Security context for patch job container
2696 allowPrivilegeEscalation: false
2697 readOnlyRootFilesystem: true
2701 # Use certmanager to generate webhook certs
2704 # self-signed root certificate
2706 duration: "" # default to be 5y
2707 # -- Set the revisionHistoryLimit on the Certificate. See
2708 # https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
2710 revisionHistoryLimit:
2712 duration: "" # default to be 1y
2713 # -- Set the revisionHistoryLimit on the Certificate. See
2714 # https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
2716 revisionHistoryLimit:
2719 # kind: "ClusterIssuer"
2720 ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
2721 ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
2724 # releaseNamespace: true
2728 ## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
2731 ## Filter namespaces to look for prometheus-operator custom resources
2733 alertmanagerInstanceNamespaces: []
2734 alertmanagerConfigNamespaces: []
2735 prometheusInstanceNamespaces: []
2736 thanosRulerInstanceNamespaces: []
2737 ## The clusterDomain value will be added to the cluster.peer option of the alertmanager.
2738 ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value)
2739 ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094
2741 # clusterDomain: "cluster.local"
2743 ## Enable creation of NetworkPolicy resources.
2746 ## Flavor of the network policy to use.
2748 # * kubernetes for networking.k8s.io/v1/NetworkPolicy
2749 # * cilium for cilium.io/v2/CiliumNetworkPolicy
2754 ## match labels used in selector
2756 ## Service account for Prometheus Operator to use.
2757 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2762 automountServiceAccountToken: true
2764 # -- terminationGracePeriodSeconds for container lifecycle hook
2765 terminationGracePeriodSeconds: 30
2766 # -- Specify lifecycle hooks for the controller
2768 ## Configuration for Prometheus operator service
2776 ipFamilies: ["IPv6", "IPv4"]
2777 ipFamilyPolicy: "PreferDualStack"
2778 ## Port to expose on each node
2779 ## Only used if service.type is 'NodePort'
2783 ## Additional ports to open for Prometheus operator service
2784 ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
2788 ## Only use if service.type is "LoadBalancer"
2791 loadBalancerSourceRanges: []
2792 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
2794 externalTrafficPolicy: Cluster
2796 ## NodePort, ClusterIP, LoadBalancer
2799 ## List of IP addresses at which the Prometheus server service is available
2800 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
2803 # ## Labels to add to the operator deployment
2806 ## Annotations to add to the operator deployment
2809 ## Labels to add to the operator pod
2812 ## Annotations to add to the operator pod
2815 ## Assign a podDisruptionBudget to the operator
2817 podDisruptionBudget:
2820 # maxUnavailable: ""
2821 unhealthyPodEvictionPolicy: AlwaysAllow
2822 ## Assign a PriorityClassName to pods if set
2823 # priorityClassName: ""
2825 ## Define Log Format
2826 # Use logfmt (default) or json logging
2829 ## Decrease log verbosity to errors only
2832 ## If true, the operator will create and maintain a service for scraping kubelets
2833 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md
2836 namespace: kube-system
2838 ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default
2840 ## Create Endpoints objects for kubelet targets.
2841 kubeletEndpointsEnabled: true
2842 ## Create EndpointSlice objects for kubelet targets.
2843 kubeletEndpointSliceEnabled: false
2844 ## Extra arguments to pass to prometheusOperator
2845 # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/operator.md
2847 # - --labels="cluster=talos-cluster"
2849 ## Create a servicemonitor for the operator
2852 ## If true, create a serviceMonitor for prometheus operator
2855 ## Labels for ServiceMonitor
2856 additionalLabels: {}
2857 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2860 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2863 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2866 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2869 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2871 labelNameLengthLimit: 0
2872 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2874 labelValueLengthLimit: 0
2875 ## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
2877 ## Metric relabel configs to apply to samples before ingestion.
2879 metricRelabelings: []
2881 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2882 # sourceLabels: [__name__]
2884 # relabel configs to apply to samples before ingestion.
2887 # - sourceLabels: [__meta_kubernetes_pod_node_name]
2890 # targetLabel: nodename
2893 ## Resource limits & requests
2903 ## Operator Environment
2908 # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
2909 # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
2912 ## Define which Nodes the Pods are scheduled on.
2913 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
2916 ## Tolerations for use with node taints
2917 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
2923 # effect: "NoSchedule"
2925 ## Assign custom affinity rules to the prometheus operator
2926 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
2930 # requiredDuringSchedulingIgnoredDuringExecution:
2931 # nodeSelectorTerms:
2932 # - matchExpressions:
2933 # - key: kubernetes.io/e2e-az-name
2942 # - ns1.svc.cluster-domain.example
2943 # - my.dns.search.suffix
2954 type: RuntimeDefault
2955 ## Setup hostUsers for prometheus-operator
2956 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/
2958 ## Container-specific security context configuration
2959 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2961 containerSecurityContext:
2962 allowPrivilegeEscalation: false
2963 readOnlyRootFilesystem: true
2967 # Enable vertical pod autoscaler support for prometheus-operator
2968 verticalPodAutoscaler:
2970 # Recommender responsible for generating recommendation for the object.
2971 # List should be empty (then the default recommender will generate the recommendation)
2972 # or contain exactly one recommender.
2974 # - name: custom-recommender-performance
2976 # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
2977 controlledResources: []
2978 # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
2979 # controlledValues: RequestsAndLimits
2981 # Define the max allowed resources for the pod
2985 # Define the min allowed resources for the pod
2991 # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
2993 # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
2994 # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
2995 updateMode: Recreate
2996 ## Prometheus-operator image
3000 repository: chainguard-private/prometheus-operator
3001 # if not set appVersion field from Chart.yaml is used
3003 sha: sha256:e84bb830eacf095e21ba9e334fa9addf068ef6d9b6dd7e5e1144bba1bd30bae7
3004 pullPolicy: IfNotPresent
3005 ## Prometheus image to use for prometheuses managed by the operator
3007 # prometheusDefaultBaseImage: prometheus/prometheus
3009 ## Prometheus image registry to use for prometheuses managed by the operator
3011 # prometheusDefaultBaseImageRegistry: quay.io
3013 ## Alertmanager image to use for alertmanagers managed by the operator
3015 # alertmanagerDefaultBaseImage: prometheus/alertmanager
3017 ## Alertmanager image registry to use for alertmanagers managed by the operator
3019 # alertmanagerDefaultBaseImageRegistry: quay.io
3021 ## Prometheus-config-reloader
3023 prometheusConfigReloader:
3026 repository: chainguard-private/prometheus-config-reloader
3027 # if not set appVersion field from Chart.yaml is used
3029 sha: sha256:31e1e672a0f85742826d390870b4f8e522f174306a29a5053a1980a0244afc2a
3030 # add prometheus config reloader liveness and readiness probe. Default: false
3032 # resource config for prometheusConfigReloader
3040 ## Thanos side-car image when configured
3044 repository: chainguard-private/thanos
3046 sha: sha256:d4c37033c9f29424057f65ebe77a557e43ee0c1269c2f3b87368c0abbc1f74bd
3047 ## Set a Label Selector to filter watched prometheus and prometheusAgent
3049 prometheusInstanceSelector: ""
3050 ## Set a Label Selector to filter watched alertmanager
3052 alertmanagerInstanceSelector: ""
3053 ## Set a Label Selector to filter watched thanosRuler
3054 thanosRulerInstanceSelector: ""
3055 ## Set a Field Selector to filter watched secrets
3057 secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1"
3058 ## If false then the user will opt out of automounting API credentials.
3060 automountServiceAccountToken: true
3061 ## Additional volumes
3064 ## Additional volume mounts
3066 extraVolumeMounts: []
3067## Deploy a Prometheus instance
3071 ## Toggle prometheus into agent mode
3072 ## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode.
3073 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/prometheus-agent.md
3076 ## Annotations for Prometheus
3079 ## Additional labels for Prometheus
3081 additionalLabels: {}
3082 ## Configure network policy for the prometheus
3085 ## Flavor of the network policy to use.
3087 # * kubernetes for networking.k8s.io/v1/NetworkPolicy
3088 # * cilium for cilium.io/v2/CiliumNetworkPolicy
3103 ## Service account for Prometheuses to use.
3104 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
3110 automountServiceAccountToken: true
3111 # Service for thanos service discovery on sidecar
3112 # Enable this can make Thanos Query can use
3113 # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery
3114 # Thanos sidecar on prometheus nodes
3115 # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!)
3120 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3122 externalTrafficPolicy: Cluster
3126 ## Service dual stack
3130 ipFamilies: ["IPv6", "IPv4"]
3131 ipFamilyPolicy: "PreferDualStack"
3136 ## HTTP port config (for metrics)
3139 targetHttpPort: "http"
3140 ## ClusterIP to assign
3141 # Default is to make this a headless service ("None")
3143 ## Port to expose on each node, if service type is NodePort
3147 # ServiceMonitor to scrape Sidecar metrics
3148 # Needs thanosService to be enabled as well
3149 thanosServiceMonitor:
3152 ## Additional labels
3154 additionalLabels: {}
3155 ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
3157 ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
3158 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
3161 ## Metric relabel configs to apply to samples before ingestion.
3162 metricRelabelings: []
3163 ## relabel configs to apply to samples before ingestion.
3165 # Service for external access to sidecar
3166 # Enabling this creates a service to expose thanos-sidecar outside the cluster.
3167 thanosServiceExternal:
3172 loadBalancerSourceRanges: []
3177 ## HTTP port config (for metrics)
3180 targetHttpPort: "http"
3181 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3183 externalTrafficPolicy: Cluster
3187 ## Port to expose on each node
3191 ## Configuration for Prometheus service
3200 ipFamilies: ["IPv6", "IPv4"]
3201 ipFamilyPolicy: "PreferDualStack"
3202 ## Port for Prometheus Service to listen on
3205 ## To be used with a proxy extraContainer port
3207 ## Port for Prometheus Reloader to listen on
3209 reloaderWebPort: 8080
3210 ## Port to expose for Prometheus Reloader
3211 ## Only used if service.type is 'NodePort'
3213 reloaderWebNodePort: null
3214 ## List of IP addresses at which the Prometheus server service is available
3215 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
3218 ## Port to expose on each node
3219 ## Only used if service.type is 'NodePort'
3223 ## Only use if service.type is "LoadBalancer"
3225 loadBalancerSourceRanges: []
3226 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3228 externalTrafficPolicy: Cluster
3232 ## Additional ports to open for Prometheus service
3236 # - name: oauth-proxy
3239 # - name: oauth-metrics
3243 ## Consider that all endpoints are considered "ready" even if the Pods themselves are not
3244 ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
3245 publishNotReadyAddresses: false
3246 ## If you want to make sure that connections from a particular client are passed to the same Pod each time
3247 ## Accepts 'ClientIP' or 'None'
3249 sessionAffinity: None
3250 ## If you want to modify the ClientIP sessionAffinity timeout
3251 ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
3253 sessionAffinityConfig:
3255 timeoutSeconds: 10800
3256 ## Configuration for creating a separate Service for each statefulset Prometheus replica
3261 ## Port for Prometheus Service per replica to listen on
3264 ## To be used with a proxy extraContainer port
3266 ## Port to expose on each node
3267 ## Only used if servicePerReplica.type is 'NodePort'
3270 ## Loadbalancer source IP ranges
3271 ## Only used if servicePerReplica.type is "LoadBalancer"
3272 loadBalancerSourceRanges: []
3273 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3275 externalTrafficPolicy: Cluster
3279 ## Service dual stack
3283 ipFamilies: ["IPv6", "IPv4"]
3284 ipFamilyPolicy: "PreferDualStack"
3285 ## Configure pod disruption budgets for Prometheus
3286 ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
3288 podDisruptionBudget:
3291 # maxUnavailable: ""
3292 unhealthyPodEvictionPolicy: AlwaysAllow
3293 ## Enable vertical pod autoscaler support for Prometheus
3294 ## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
3296 verticalPodAutoscaler:
3298 # Recommender responsible for generating recommendation for the object.
3299 # List should be empty (then the default recommender will generate the recommendation)
3300 # or contain exactly one recommender.
3302 # - name: custom-recommender-performance
3304 # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
3305 controlledResources: []
3306 # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
3307 # controlledValues: RequestsAndLimits
3309 # Define the max allowed resources for the pod
3313 # Define the min allowed resources for the pod
3319 # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
3320 # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
3321 updateMode: Recreate
3322 # Ingress exposes thanos sidecar outside the cluster
3325 ingressClassName: ""
3329 ## Port to expose on each node
3330 ## Only used if service.type is 'NodePort'
3333 ## Hosts must be provided if Ingress is enabled.
3336 # - thanos-gateway.domain.com
3338 ## Paths to use for ingress rules
3343 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3344 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3345 # pathType: ImplementationSpecific
3347 ## TLS configuration for Thanos Ingress
3348 ## Secret must be manually created in the namespace
3351 # - secretName: thanos-gateway-tls
3353 # - thanos-gateway.domain.com
3355 ## ExtraSecret can be used to store various data in an extra secret
3356 ## (use it for example to store hashed basic auth credentials)
3358 ## if not set, name will be auto generated
3363 # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
3364 # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
3368 ingressClassName: ""
3371 ## Redirect ingress to an additional defined port on the service
3375 ## Must be provided if Ingress is enabled.
3378 # - prometheus.domain.com
3380 ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
3385 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3386 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3387 # pathType: ImplementationSpecific
3389 ## TLS configuration for Prometheus Ingress
3390 ## Secret must be manually created in the namespace
3393 # - secretName: prometheus-general-tls
3395 # - prometheus.example.com
3396 # -- BETA: Configure the gateway routes for the chart here.
3397 # More routes can be added by adding a dictionary key like the 'main' route.
3398 # Be aware that this is an early beta of this feature,
3399 # kube-prometheus-stack does not guarantee this works and is subject to change.
3400 # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
3401 # [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
3404 # -- Enables or disables the route
3406 # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
3407 apiVersion: gateway.networking.k8s.io/v1
3408 # -- Set the route kind
3409 # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
3414 # - my-filter.example.com
3418 # -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
3419 ## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
3420 ## matches, filters and additionalRules will be ignored if this is set to true. Be are
3421 httpsRedirect: false
3426 ## Filters define the filters that are applied to requests that match this rule.
3428 ## Session persistence configuration for the route rule.
3429 sessionPersistence: {}
3430 # sessionName: route
3432 # absoluteTimeout: 12h
3434 # lifetimeType: Permanent
3436 ## Additional custom rules that can be added to the route
3438 ## Configuration for creating an Ingress that will map to each Prometheus replica service
3439 ## prometheus.servicePerReplica must be enabled
3443 ingressClassName: ""
3446 ## Final form of the hostname for each per replica ingress is
3447 ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
3449 ## Prefix for the per replica ingress that will have `-$replicaNumber`
3450 ## appended to the end
3452 ## Domain that will be used for the per replica ingress
3454 ## Paths to use for ingress rules
3459 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3460 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3461 # pathType: ImplementationSpecific
3463 ## Secret name containing the TLS certificate for Prometheus per replica ingress
3464 ## Secret must be manually created in the namespace
3466 ## Separated secret for each per replica Ingress. Can be used together with cert-manager
3468 tlsSecretPerReplica:
3470 ## Final form of the secret for each per replica ingress is
3471 ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
3473 prefix: "prometheus"
3475 ## If true, create a serviceMonitor for prometheus
3478 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
3481 ## Additional labels
3483 additionalLabels: {}
3484 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
3487 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
3490 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3493 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3495 labelNameLengthLimit: 0
3496 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3498 labelValueLengthLimit: 0
3499 ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
3501 ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
3502 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
3505 ## Metric relabel configs to apply to samples before ingestion.
3507 metricRelabelings: []
3509 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
3510 # sourceLabels: [__name__]
3512 # relabel configs to apply to samples before ingestion.
3515 # - sourceLabels: [__meta_kubernetes_pod_node_name]
3518 # targetLabel: nodename
3522 ## Additional Endpoints
3524 additionalEndpoints: []
3525 # - port: oauth-metrics
3527 ## Settings affecting prometheusSpec
3528 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheusspec
3531 ## Statefulset's persistent volume claim retention policy
3532 ## whenDeleted and whenScaled determine whether
3533 ## statefulset's PVCs are deleted (true) or retained (false)
3534 ## on scaling down and deleting statefulset, respectively.
3535 ## Requires Kubernetes version 1.27.0+.
3536 ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
3537 persistentVolumeClaimRetentionPolicy: {}
3538 # whenDeleted: Retain
3539 # whenScaled: Retain
3541 ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
3543 disableCompaction: false
3544 ## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod,
3545 ## If the field isn't set, the operator mounts the service account token by default.
3546 ## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery,
3547 ## It is possible to use strategic merge patch to project the service account token into the 'prometheus' container.
3548 automountServiceAccountToken: true
3550 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#apiserverconfig
3553 ## Allows setting additional arguments for the Prometheus container
3554 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.Prometheus
3556 ## Convert all classic histograms to native histograms with custom buckets.
3557 ## This corresponds to the 'convert_classic_histograms_to_nhcb' field in Prometheus configuration.
3559 convertClassicHistogramsToNHCB: false
3560 ## Enable scraping of classic histograms that are also exposed as native histograms.
3561 ## This corresponds to the 'always_scrape_classic_histograms' field in Prometheus configuration.
3563 scrapeClassicHistograms: false
3564 ## Enable scraping of native histograms.
3565 ## This corresponds to the 'scrape_native_histograms' field in Prometheus configuration.
3567 scrapeNativeHistograms: false
3568 ## File to which scrape failures are logged.
3569 ## Reloading the configuration will reopen the file.
3570 ## Defaults to empty (disabled)
3571 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.Prometheus
3573 scrapeFailureLogFile: ""
3574 ## Interval between consecutive scrapes.
3576 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
3579 ## Number of seconds to wait for target to respond before erroring
3582 ## List of scrape classes to expose to scraping objects such as
3583 ## PodMonitors, ServiceMonitors, Probes and ScrapeConfigs.
3586 # - name: istio-mtls
3589 # caFile: /etc/prometheus/secrets/istio.default/root-cert.pem
3590 # certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem
3592 ## PodTargetLabels are appended to the `spec.podTargetLabels` field of all PodMonitor and ServiceMonitor objects.
3597 ## Interval between consecutive evaluations.
3599 evaluationInterval: ""
3600 ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
3603 ## enableOTLPReceiver enables the OTLP receiver for Prometheus.
3604 enableOTLPReceiver: false
3605 ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
3606 ## This is disabled by default.
3607 ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
3609 enableAdminAPI: false
3610 ## Sets version of Prometheus overriding the Prometheus version as derived
3611 ## from the image tag. Useful in cases where the tag does not follow semver v2.
3613 ## WebTLSConfig defines the TLS parameters for HTTPS
3614 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#webtlsconfig
3616 ## Exemplars related settings that are runtime reloadable.
3617 ## It requires to enable the exemplar storage feature to be effective.
3619 ## Maximum number of exemplars stored in memory for all series.
3620 ## If not set, Prometheus uses its default value.
3621 ## A value of zero or less than zero disables the storage.
3624 # EnableFeatures API enables access to Prometheus disabled features.
3625 # ref: https://prometheus.io/docs/prometheus/latest/feature_flags/
3627 # - exemplar-storage
3629 ## https://prometheus.io/docs/guides/opentelemetry
3632 # promoteResourceAttributes: []
3633 # keepIdentifyingResourceAttributes: false
3634 # translationStrategy: NoUTF8EscapingWithSuffixes
3635 # convertHistogramsToNHCB: false
3639 ## Image of Prometheus.
3643 repository: chainguard-private/prometheus
3645 sha: sha256:fb0dcc4117889b88a2a432f2398140287c2de92752251cd3ae61fe6bdb68ebd0
3646 pullPolicy: IfNotPresent
3647 ## Tolerations for use with node taints
3648 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
3654 # effect: "NoSchedule"
3656 ## If specified, the pod's topology spread constraints.
3657 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
3659 topologySpreadConstraints: []
3661 # topologyKey: topology.kubernetes.io/zone
3662 # whenUnsatisfiable: DoNotSchedule
3669 disableAlerting: false
3670 ## Alertmanagers to which alerts will be sent
3671 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerendpoints
3673 ## Default configuration will connect to the alertmanager deployed as part of this release
3675 alertingEndpoints: []
3682 # bearerTokenFile: ""
3685 ## External labels to add to any time series or alerts when communicating with external systems
3688 ## enable --web.enable-remote-write-receiver flag on prometheus-server
3690 enableRemoteWriteReceiver: false
3691 ## Name of the external label used to denote replica name
3693 replicaExternalLabelName: ""
3694 ## If true, the Operator won't add the external label used to denote replica name
3696 replicaExternalLabelNameClear: false
3697 ## Name of the external label used to denote Prometheus instance name
3699 prometheusExternalLabelName: ""
3700 ## If true, the Operator won't add the external label used to denote Prometheus instance name
3702 prometheusExternalLabelNameClear: false
3703 ## External URL at which Prometheus will be reachable.
3706 ## Define which Nodes the Pods are scheduled on.
3707 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
3710 ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
3711 ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
3712 ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
3713 ## with the new list of secrets.
3716 ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
3717 ## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
3720 ## QuerySpec defines the query command line flags when starting Prometheus.
3721 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#queryspec
3724 ## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery.
3725 ruleNamespaceSelector: {}
3726 ## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel"
3727 # ruleNamespaceSelector:
3729 # prometheus: somelabel
3731 ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
3732 ## prometheus resource to be created with selectors based on values in the helm deployment,
3733 ## which will also match the PrometheusRule resources created
3735 ruleSelectorNilUsesHelmValues: true
3736 ## PrometheusRules to be selected for target discovery.
3737 ## If {}, select all PrometheusRules
3740 ## Example which select all PrometheusRules resources
3741 ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
3750 ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
3753 # role: example-rules
3755 ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
3756 ## prometheus resource to be created with selectors based on values in the helm deployment,
3757 ## which will also match the servicemonitors created
3759 serviceMonitorSelectorNilUsesHelmValues: true
3760 ## ServiceMonitors to be selected for target discovery.
3761 ## If {}, select all ServiceMonitors
3763 serviceMonitorSelector: {}
3764 ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
3765 # serviceMonitorSelector:
3767 # prometheus: somelabel
3769 ## Namespaces to be selected for ServiceMonitor discovery.
3771 serviceMonitorNamespaceSelector: {}
3772 ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel"
3773 # serviceMonitorNamespaceSelector:
3775 # prometheus: somelabel
3777 ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
3778 ## prometheus resource to be created with selectors based on values in the helm deployment,
3779 ## which will also match the podmonitors created
3781 podMonitorSelectorNilUsesHelmValues: true
3782 ## PodMonitors to be selected for target discovery.
3783 ## If {}, select all PodMonitors
3785 podMonitorSelector: {}
3786 ## Example which selects PodMonitors with label "prometheus" set to "somelabel"
3787 # podMonitorSelector:
3789 # prometheus: somelabel
3791 ## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery.
3792 podMonitorNamespaceSelector: {}
3793 ## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel"
3794 # podMonitorNamespaceSelector:
3796 # prometheus: somelabel
3798 ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
3799 ## prometheus resource to be created with selectors based on values in the helm deployment,
3800 ## which will also match the probes created
3802 probeSelectorNilUsesHelmValues: true
3803 ## Probes to be selected for target discovery.
3804 ## If {}, select all Probes
3807 ## Example which selects Probes with label "prometheus" set to "somelabel"
3810 # prometheus: somelabel
3812 ## If nil, select own namespace. Namespaces to be selected for Probe discovery.
3813 probeNamespaceSelector: {}
3814 ## Example which selects Probe in namespaces with label "prometheus" set to "somelabel"
3815 # probeNamespaceSelector:
3817 # prometheus: somelabel
3819 ## If true, a nil or {} value for prometheus.prometheusSpec.scrapeConfigSelector will cause the
3820 ## prometheus resource to be created with selectors based on values in the helm deployment,
3821 ## which will also match the scrapeConfigs created
3823 ## If null and scrapeConfigSelector is also null, exclude field from the prometheusSpec
3824 ## (keeping downward compatibility with older versions of CRD)
3826 scrapeConfigSelectorNilUsesHelmValues: true
3827 ## scrapeConfigs to be selected for target discovery.
3828 ## If {}, select all scrapeConfigs
3830 scrapeConfigSelector: {}
3831 ## Example which selects scrapeConfigs with label "prometheus" set to "somelabel"
3832 # scrapeConfigSelector:
3834 # prometheus: somelabel
3836 ## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery.
3837 ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD)
3838 scrapeConfigNamespaceSelector: {}
3839 ## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel"
3840 # scrapeConfigNamespaceSelector:
3842 # prometheus: somelabel
3844 ## How long to retain metrics
3847 ## Maximum size of metrics
3848 ## Unit format should be in the form of "50GiB"
3850 ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration
3851 ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb
3853 outOfOrderTimeWindow: 0s
3854 ## Enable compression of the write-ahead log using Snappy.
3856 walCompression: true
3857 ## If true, the Operator won't process any Prometheus configuration changes
3860 ## Number of replicas of each shard to deploy for a Prometheus deployment.
3861 ## Number of replicas multiplied by shards is the total number of Pods created.
3864 ## EXPERIMENTAL: Number of shards to distribute targets onto.
3865 ## Number of replicas multiplied by shards is the total number of Pods created.
3866 ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
3867 ## Increasing shards will not reshard data either but it will continue to be available from the same instances.
3868 ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
3869 ## Sharding is done on the content of the `__address__` target meta-label.
3872 ## Log level for Prometheus be configured in
3875 ## Log format for Prometheus be configured in
3878 ## Prefix used to register routes, overriding externalUrl route.
3879 ## Useful for proxies that rewrite URLs.
3882 ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
3883 ## Metadata Labels and Annotations gets propagated to the prometheus pods.
3888 # k8s-app: prometheus
3890 ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
3891 ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
3892 ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
3893 ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
3894 podAntiAffinity: "soft"
3895 ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
3896 ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
3898 podAntiAffinityTopologyKey: kubernetes.io/hostname
3899 ## Assign custom affinity rules to the prometheus instance
3900 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
3904 # requiredDuringSchedulingIgnoredDuringExecution:
3905 # nodeSelectorTerms:
3906 # - matchExpressions:
3907 # - key: kubernetes.io/e2e-az-name
3913 ## The remote_read spec configuration for Prometheus.
3914 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotereadspec
3916 # - url: http://remote1/read
3917 ## additionalRemoteRead is appended to remoteRead
3918 additionalRemoteRead: []
3919 ## The remote_write spec configuration for Prometheus.
3920 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotewritespec
3922 # - url: http://remote1/push
3923 ## additionalRemoteWrite is appended to remoteWrite
3924 additionalRemoteWrite: []
3925 ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
3926 remoteWriteDashboards: false
3927 ## Resource limits & requests
3933 ## Prometheus StorageSpec for persistent data
3934 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
3937 ## Using PersistentVolumeClaim
3939 # volumeClaimTemplate:
3941 # storageClassName: gluster
3942 # accessModes: ["ReadWriteOnce"]
3948 ## Using tmpfs volume
3953 # Additional volumes on the output StatefulSet definition.
3955 # Additional VolumeMounts on the output StatefulSet definition.
3957 ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
3958 ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
3959 ## as specified in the official Prometheus documentation:
3960 ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
3961 ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
3962 ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
3963 ## scrape configs are going to break Prometheus after the upgrade.
3964 ## AdditionalScrapeConfigs can be defined as a list or as a templated string.
3966 ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the
3967 ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
3969 additionalScrapeConfigs: []
3970 # - job_name: kube-etcd
3971 # kubernetes_sd_configs:
3975 # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
3976 # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
3977 # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
3979 # - action: labelmap
3980 # regex: __meta_kubernetes_node_label_(.+)
3981 # - source_labels: [__address__]
3983 # target_label: __address__
3984 # regex: ([^:;]+):(\d+)
3985 # replacement: ${1}:2379
3986 # - source_labels: [__meta_kubernetes_node_name]
3989 # - source_labels: [__meta_kubernetes_node_name]
3991 # target_label: node
3994 # metric_relabel_configs:
3995 # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
3998 ## If scrape config contains a repetitive section, you may want to use a template.
3999 ## In the following example, you can see how to define `gce_sd_configs` for multiple zones
4000 # additionalScrapeConfigs: |
4001 # - job_name: "node-exporter"
4003 # {{range $zone := .Values.gcp_zones}}
4004 # - project: "project1"
4011 ## If additional scrape configurations are already deployed in a single secret file you can use this section.
4012 ## Expected values are the secret name and key
4013 ## Cannot be used with additionalScrapeConfigs
4014 additionalScrapeConfigsSecret: {}
4019 ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
4020 ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
4021 additionalPrometheusSecretsAnnotations: {}
4022 ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
4023 ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config.
4024 ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
4025 ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
4026 ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
4027 ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
4029 additionalAlertManagerConfigs: []
4030 # - consul_sd_configs:
4031 # - server: consul.dev.test:8500
4034 # tag_separator: ','
4036 # - metrics-prometheus-alertmanager
4038 ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage
4039 ## them separately from the helm deployment, you can use this section.
4040 ## Expected values are the secret name and key
4041 ## Cannot be used with additionalAlertManagerConfigs
4042 additionalAlertManagerConfigsSecret: {}
4047 ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
4048 ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
4049 ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
4050 ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
4051 ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
4052 ## configs are going to break Prometheus after the upgrade.
4054 additionalAlertRelabelConfigs: []
4056 # regex: prometheus_replica
4060 ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage
4061 ## them separately from the helm deployment, you can use this section.
4062 ## Expected values are the secret name and key
4063 ## Cannot be used with additionalAlertRelabelConfigs
4064 additionalAlertRelabelConfigsSecret: {}
4068 ## SecurityContext holds pod-level security attributes and common container settings.
4069 ## This defaults to non root user with uid 1000 and gid 2000.
4070 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md
4078 type: RuntimeDefault
4079 ## DNS configuration for Prometheus.
4080 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig
4082 ## DNS policy for Prometheus.
4083 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias
4085 ## Priority class assigned to the Pods
4087 priorityClassName: ""
4088 ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
4089 ## This section is experimental, it may change significantly without deprecation notice in any release.
4090 ## This is experimental and may change significantly without backward compatibility in any release.
4091 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosspec
4094 # image: quay.io/thanos/thanos
4095 # secretProviderClass:
4099 # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest"
4100 # fileName: "objstore.yaml"
4101 ## ObjectStorageConfig configures object storage in Thanos.
4102 # objectStorageConfig:
4103 # # use existing secret, if configured, objectStorageConfig.secret will not be used
4104 # existingSecret: {}
4107 # # will render objectStorageConfig secret data and configure it to be used by Thanos custom resource,
4108 # # ignored when prometheusspec.thanos.objectStorageConfig.existingSecret is set
4109 # # https://thanos.io/tip/thanos/storage.md/#s3
4119 ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
4120 ## if using proxy extraContainer update targetPort with proxy container port
4123 # - name: oauth-proxy
4124 # image: quay.io/oauth2-proxy/oauth2-proxy:v7.15.2
4126 # - --upstream=http://127.0.0.1:9090
4127 # - --http-address=0.0.0.0:8081
4128 # - --metrics-address=0.0.0.0:8082
4131 # - containerPort: 8081
4134 # - containerPort: 8082
4135 # name: oauth-metrics
4139 ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
4140 ## (permissions, dir tree) on mounted volumes before starting prometheus
4142 ## PortName to use for Prometheus.
4144 portName: "http-web"
4145 ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
4146 ## on the file system of the Prometheus container e.g. bearer token files.
4147 arbitraryFSAccessThroughSMs: false
4148 ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
4149 ## or PodMonitor to true, this overrides honor_labels to false.
4150 overrideHonorLabels: false
4151 ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
4152 overrideHonorTimestamps: false
4153 ## When ignoreNamespaceSelectors is set to true, namespaceSelector from all PodMonitor, ServiceMonitor and Probe objects will be ignored,
4154 ## they will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object,
4155 ## and servicemonitors will be installed in the default service namespace.
4156 ## Defaults to false.
4157 ignoreNamespaceSelectors: false
4158 ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
4159 ## The label value will always be the namespace of the object that is being created.
4160 ## Disabled by default
4161 enforcedNamespaceLabel: ""
4162 ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
4163 ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
4164 ## Deprecated, use `excludedFromEnforcement` instead
4165 prometheusRulesExcludedFromEnforce: []
4166 ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects
4167 ## to be excluded from enforcing a namespace label of origin.
4168 ## Works only if enforcedNamespaceLabel set to true.
4169 ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#objectreference
4170 excludedFromEnforcement: []
4171 ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
4172 ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
4173 ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
4174 ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
4176 # Use to set global sample_limit for Prometheus. This act as default SampleLimit for ServiceMonitor or/and PodMonitor.
4177 # Set to 'false' to disable global sample_limit. or set to a number to override the default value.
4179 # EnforcedKeepDroppedTargetsLimit defines on the number of targets dropped by relabeling that will be kept in memory.
4180 # The value overrides any spec.keepDroppedTargets set by ServiceMonitor, PodMonitor, Probe objects unless spec.keepDroppedTargets
4181 # is greater than zero and less than spec.enforcedKeepDroppedTargets. 0 means no limit.
4182 enforcedKeepDroppedTargets: 0
4183 ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
4184 ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
4185 ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
4186 enforcedSampleLimit: false
4187 ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
4188 ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
4189 ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
4190 ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
4191 enforcedTargetLimit: false
4192 ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
4193 ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
4194 ## 2.27.0 and newer.
4195 enforcedLabelLimit: false
4196 ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
4197 ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
4198 ## 2.27.0 and newer.
4199 enforcedLabelNameLengthLimit: false
4200 ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
4201 ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
4202 ## versions 2.27.0 and newer.
4203 enforcedLabelValueLengthLimit: false
4204 ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
4205 ## in Prometheus so it may change in any upcoming release.
4206 allowOverlappingBlocks: false
4207 ## Specifies the validation scheme for metric and label names.
4208 ## Supported values are: Legacy, UTF8
4209 nameValidationScheme: ""
4210 ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
4211 ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
4213 ## Duration in seconds the pod needs to terminate gracefully.
4214 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
4215 terminationGracePeriodSeconds: ~
4216 # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
4217 # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
4218 # Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it.
4219 # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically.
4221 ## Use the host's user namespace for Prometheus pods.
4222 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
4224 # HostAlias holds the mapping between IP and hostnames that will be injected
4225 # as an entry in the pod's hosts file.
4232 ## TracingConfig configures tracing in Prometheus.
4233 ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheustracingconfig
4235 ## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints.
4236 ## If set, the value should be either "Endpoints" or "EndpointSlice". If unset, the operator assumes the "Endpoints" role.
4237 serviceDiscoveryRole: ""
4238 ## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
4239 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
4240 podManagementPolicy: ""
4241 ## Update strategy for the StatefulSet.
4242 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
4244 # type: RollingUpdate
4248 ## Additional configuration which is not covered by the properties above. (passed through tpl)
4249 additionalConfig: {}
4250 ## Additional configuration which is not covered by the properties above.
4251 ## Useful, if you need advanced templating inside alertmanagerSpec.
4252 ## Otherwise, use prometheus.prometheusSpec.additionalConfig (passed through tpl)
4253 additionalConfigString: ""
4254 ## Defines the maximum time that the `prometheus` container's startup probe
4255 ## will wait before being considered failed. The startup probe will return
4256 ## success after the WAL replay is complete. If set, the value should be
4257 ## greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15
4259 maximumStartupDurationSeconds: 0
4260 ## Set default scrapeProtocols for Prometheus instances
4261 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#scrapeprotocolstring-alias
4263 additionalRulesForClusterRole: []
4264 # - apiGroups: [ "" ]
4267 # verbs: [ "get", "list", "watch" ]
4269 additionalServiceMonitors: []
4270 ## Name of the ServiceMonitor to create
4274 ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
4277 # additionalLabels: {}
4279 ## Service label for use in assembling a job name of the form <label value>-<port>
4280 ## If no label is specified, the service name is used.
4284 ## labels to transfer from the kubernetes service to the target
4288 ## labels to transfer from the kubernetes pods to the target
4290 # podTargetLabels: []
4292 ## Label selector for services to which this ServiceMonitor applies
4295 ## Example which selects all services to be monitored
4296 ## with label "monitoredby" with values any of "example-service-1" or "example-service-2"
4298 # - key: "monitoredby"
4301 # - example-service-1
4302 # - example-service-2
4304 ## label selector for services
4308 ## Namespaces from which services are selected
4310 # namespaceSelector:
4311 ## Match any namespace
4315 ## Explicit list of namespace names to select
4319 ## Endpoints of the selected service to be monitored
4322 ## Name of the endpoint's service port
4323 ## Mutually exclusive with targetPort
4326 ## Name or number of the endpoint's target port
4327 ## Mutually exclusive with port
4330 ## File containing bearer token to be used when scraping targets
4332 # bearerTokenFile: ""
4334 ## Interval at which metrics should be scraped
4338 ## HTTP path to scrape for metrics
4342 ## HTTP scheme to use for scraping
4346 ## TLS configuration to use when scraping the endpoint
4350 ## Path to the CA file
4354 ## Path to client certificate file
4358 ## Skip certificate verification
4360 # insecureSkipVerify: false
4362 ## Path to client key file
4366 ## Server name used to verify host name
4370 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
4371 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4373 # metricRelabelings: []
4375 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
4376 # sourceLabels: [__name__]
4378 ## RelabelConfigs to apply to samples before scraping
4379 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4382 # - sourceLabels: [__meta_kubernetes_pod_node_name]
4385 # targetLabel: nodename
4389 ## Fallback scrape protocol used by Prometheus for scraping metrics
4390 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
4392 # fallbackScrapeProtocol: ""
4394 ## Attaches node metadata to the discovered targets
4395 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata
4399 additionalPodMonitors: []
4400 ## Name of the PodMonitor to create
4403## Additional labels to set used for the PodMonitorSelector. Together with standard labels from
4406# additionalLabels: {}
4408## Pod label for use in assembling a job name of the form <label value>-<port>
4409## If no label is specified, the pod endpoint name is used.
4413## Label selector for pods to which this PodMonitor applies
4416## Example which selects all Pods to be monitored
4417## with label "monitoredby" with values any of "example-pod-1" or "example-pod-2"
4419# - key: "monitoredby"
4425## label selector for pods
4429## PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
4431# podTargetLabels: {}
4433## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
4437## Namespaces from which pods are selected
4440## Match any namespace
4444## Explicit list of namespace names to select
4448## Endpoints of the selected pods to be monitored
4449## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmetricsendpoint
4451# podMetricsEndpoints: []
4453## Fallback scrape protocol used by Prometheus for scraping metrics
4454## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
4456# fallbackScrapeProtocol: ""
4458## Attaches node metadata to the discovered targets
4459## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata
4464## Configuration for thanosRuler
4465## ref: https://thanos.io/tip/components/rule.md/
4468 ## Deploy thanosRuler
4471 ## Annotations for ThanosRuler
4474 ## Service account for ThanosRuler to use.
4475 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
4481 ## Configure pod disruption budgets for ThanosRuler
4482 ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
4484 podDisruptionBudget:
4487 # maxUnavailable: ""
4488 unhealthyPodEvictionPolicy: AlwaysAllow
4491 ingressClassName: ""
4494 ## Hosts must be provided if Ingress is enabled.
4497 # - thanosruler.domain.com
4499 ## Paths to use for ingress rules - one path should match the thanosruler.routePrefix
4504 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
4505 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
4506 # pathType: ImplementationSpecific
4508 ## TLS configuration for ThanosRuler Ingress
4509 ## Secret must be manually created in the namespace
4512 # - secretName: thanosruler-general-tls
4514 # - thanosruler.example.com
4515 # -- BETA: Configure the gateway routes for the chart here.
4516 # More routes can be added by adding a dictionary key like the 'main' route.
4517 # Be aware that this is an early beta of this feature,
4518 # kube-prometheus-stack does not guarantee this works and is subject to change.
4519 # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
4520 # [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
4523 # -- Enables or disables the route
4525 # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
4526 apiVersion: gateway.networking.k8s.io/v1
4527 # -- Set the route kind
4528 # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
4533 # - my-filter.example.com
4537 # -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
4538 ## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
4539 ## matches, filters and additionalRules will be ignored if this is set to true. Be are
4540 httpsRedirect: false
4545 ## Filters define the filters that are applied to requests that match this rule.
4547 ## Session persistence configuration for the route rule.
4548 sessionPersistence: {}
4549 # sessionName: route
4551 # absoluteTimeout: 12h
4553 # lifetimeType: Permanent
4555 ## Additional custom rules that can be added to the route
4557 ## Configuration for ThanosRuler service
4566 ipFamilies: ["IPv6", "IPv4"]
4567 ipFamilyPolicy: "PreferDualStack"
4568 ## Port for ThanosRuler Service to listen on
4571 ## To be used with a proxy extraContainer port
4574 ## Port to expose on each node
4575 ## Only used if service.type is 'NodePort'
4578 ## List of IP addresses at which the Prometheus server service is available
4579 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
4582 ## Additional ports to open for ThanosRuler service
4586 loadBalancerSourceRanges: []
4587 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
4589 externalTrafficPolicy: Cluster
4593 ## Configuration for creating a ServiceMonitor for the ThanosRuler service
4596 ## If true, create a serviceMonitor for thanosRuler
4599 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
4602 ## Additional labels
4604 additionalLabels: {}
4605 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
4608 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
4611 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4614 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4616 labelNameLengthLimit: 0
4617 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4619 labelValueLengthLimit: 0
4620 ## proxyUrl: URL of a proxy that should be used for scraping.
4623 ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
4625 ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
4626 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
4629 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
4630 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4632 metricRelabelings: []
4634 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
4635 # sourceLabels: [__name__]
4637 ## RelabelConfigs to apply to samples before scraping
4638 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4641 # - sourceLabels: [__meta_kubernetes_pod_node_name]
4644 # targetLabel: nodename
4648 ## Additional Endpoints
4650 additionalEndpoints: []
4651 # - port: oauth-metrics
4653 ## Settings affecting thanosRulerpec
4654 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosrulerspec
4657 ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
4658 ## Metadata Labels and Annotations gets propagated to the ThanosRuler pods.
4663 ## Image of ThanosRuler
4667 repository: chainguard-private/thanos
4669 sha: sha256:d4c37033c9f29424057f65ebe77a557e43ee0c1269c2f3b87368c0abbc1f74bd
4670 ## Namespaces to be selected for PrometheusRules discovery.
4671 ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
4672 ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#namespaceselector for usage
4674 ruleNamespaceSelector: {}
4675 ## If true, a nil or {} value for thanosRuler.thanosRulerSpec.ruleSelector will cause the
4676 ## prometheus resource to be created with selectors based on values in the helm deployment,
4677 ## which will also match the PrometheusRule resources created
4679 ruleSelectorNilUsesHelmValues: true
4680 ## PrometheusRules to be selected for target discovery.
4681 ## If {}, select all PrometheusRules
4684 ## Example which select all PrometheusRules resources
4685 ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
4694 ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
4697 # role: example-rules
4699 ## Define Log Format
4700 # Use logfmt (default) or json logging
4702 ## Log level for ThanosRuler to be configured with.
4705 ## Size is the expected size of the thanosRuler cluster. The controller will eventually make the size of the
4706 ## running cluster equal to the expected size.
4708 ## Time duration ThanosRuler shall retain data for. Default is '24h', and must match the regular expression
4709 ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
4712 ## Interval between consecutive evaluations.
4714 evaluationInterval: ""
4715 ## Storage is the definition of how storage will be used by the ThanosRuler instances.
4716 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
4719 # volumeClaimTemplate:
4721 # storageClassName: gluster
4722 # accessModes: ["ReadWriteOnce"]
4728 ## AlertmanagerConfig define configuration for connecting to alertmanager.
4729 ## Only available with Thanos v0.10.0 and higher. Maps to the alertmanagers.config Thanos Ruler arg.
4730 alertmanagersConfig:
4731 # use existing secret, if configured, alertmanagersConfig.secret will not be used
4735 # will render alertmanagersConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when alertmanagersConfig.existingSecret is set
4736 # https://thanos.io/tip/components/rule.md/#alertmanager
4742 # username: some_user
4743 # password: some_pass
4745 # - alertmanager.thanos.io
4748 ## DEPRECATED. Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, alertmanagersConfig should be used instead.
4749 ## Note: this field will be ignored if alertmanagersConfig is specified. Maps to the alertmanagers.url Thanos Ruler arg.
4752 ## The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. string false
4755 ## If true, http://{{ template "kube-prometheus-stack.thanosRuler.name" . }}.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.thanosRuler.service.port }}
4756 ## will be used as value for externalPrefix
4757 externalPrefixNilUsesHelmValues: true
4758 ## The route prefix ThanosRuler registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
4759 ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
4762 ## ObjectStorageConfig configures object storage in Thanos
4763 objectStorageConfig:
4764 # use existing secret, if configured, objectStorageConfig.secret will not be used
4768 # will render objectStorageConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when objectStorageConfig.existingSecret is set
4769 # https://thanos.io/tip/thanos/storage.md/#s3
4778 ## Labels by name to drop before sending to alertmanager
4779 ## Maps to the --alert.label-drop flag of thanos ruler.
4781 ## QueryEndpoints defines Thanos querier endpoints from which to query metrics.
4782 ## Maps to the --query flag of thanos ruler.
4784 ## Define configuration for connecting to thanos query instances. If this is defined, the queryEndpoints field will be ignored.
4785 ## Maps to the query.config CLI argument. Only available with thanos v0.11.0 and higher.
4787 # use existing secret, if configured, queryConfig.secret will not be used
4791 # render queryConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when queryConfig.existingSecret is set
4792 # https://thanos.io/tip/components/rule.md/#query-api
4796 # username: some_user
4797 # password: some_pass
4802 ## Labels configure the external label pairs to ThanosRuler. A default replica
4803 ## label `thanos_ruler_replica` will be always added as a label with the value
4804 ## of the pod's name and it will be dropped in the alerts.
4806 ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
4809 ## Allows setting additional arguments for the ThanosRuler container
4810 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosruler
4813 # - name: remote-write.config
4816 # - "name": "receiver-0"
4817 # "remote_timeout": "30s"
4818 # "url": "http://thanos-receiver-0.thanos-receiver:8081/api/v1/receive"
4820 ## Define which Nodes the Pods are scheduled on.
4821 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
4824 ## Define resources requests and limits for single Pods.
4825 ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
4831 ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
4832 ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
4833 ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
4834 ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
4836 podAntiAffinity: "soft"
4837 ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
4838 ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
4840 podAntiAffinityTopologyKey: kubernetes.io/hostname
4841 ## Assign custom affinity rules to the thanosRuler instance
4842 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
4846 # requiredDuringSchedulingIgnoredDuringExecution:
4847 # nodeSelectorTerms:
4848 # - matchExpressions:
4849 # - key: kubernetes.io/e2e-az-name
4855 ## If specified, the pod's tolerations.
4856 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
4862 # effect: "NoSchedule"
4864 ## If specified, the pod's topology spread constraints.
4865 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
4867 topologySpreadConstraints: []
4869 # topologyKey: topology.kubernetes.io/zone
4870 # whenUnsatisfiable: DoNotSchedule
4875 ## SecurityContext holds pod-level security attributes and common container settings.
4876 ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
4877 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
4885 type: RuntimeDefault
4886 ## Use the host's user namespace for ThanosRuler pods.
4887 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
4889 ## ListenLocal makes the ThanosRuler server listen on loopback, so that it does not bind against the Pod IP.
4890 ## Note this is only for the ThanosRuler UI, not the gossip communication.
4893 ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an ThanosRuler pod.
4896 # Additional volumes on the output StatefulSet definition.
4898 # Additional VolumeMounts on the output StatefulSet definition.
4900 ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
4901 ## (permissions, dir tree) on mounted volumes before starting prometheus
4903 ## Priority class assigned to the Pods
4905 priorityClassName: ""
4906 ## PortName to use for ThanosRuler.
4909 ## Duration in seconds the pod needs to terminate gracefully.
4910 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
4911 terminationGracePeriodSeconds: ~
4912 ## WebTLSConfig defines the TLS parameters for HTTPS
4913 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosrulerwebspec
4915 ## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
4916 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
4917 podManagementPolicy: ""
4918 ## Update strategy for the StatefulSet.
4919 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
4921 # type: RollingUpdate
4925 ## Additional configuration which is not covered by the properties above. (passed through tpl)
4926 additionalConfig: {}
4927 ## Additional configuration which is not covered by the properties above.
4928 ## Useful, if you need advanced templating
4929 additionalConfigString: ""
4930 ## ExtraSecret can be used to store various data in an extra secret
4931 ## (use it for example to store hashed basic auth credentials)
4933 ## if not set, name will be auto generated
4938 # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
4939 # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
4940## Setting to true produces cleaner resource names, but requires a data migration because the name of the persistent volume changes. Therefore this should only be set once on initial installation.
4942cleanPrometheusOperatorObjectNames: false
4943## Extra manifests to deploy. Can be of type dict or list.
4944## If dict, keys are ignored and only values are used.
4945## Items contained within extraObjects can be defined as dict or string and are passed through tpl.
4951# name: prometheus-extra
4953# extra-data: "value"
4955# can also be defined as a string, useful for templating field names
4963# {{- range $key, $value := .Values.commonLabels }}
4964# {{ $key }}: {{ $value }}
4967# plaintext: Zm9vYmFy
4968# templated: '{{ print "foobar" | upper | b64enc }}'