Skip to content
Commits on Source (4)
......@@ -68,18 +68,18 @@
"subdir": "gen/grafonnet-latest"
}
},
"version": "733beadbc8dab55c5fe1bcdcf0d8a2d215759a55",
"sum": "eyuJ0jOXeA4MrobbNgU4/v5a7ASDHslHZ0eS6hDdWoI="
"version": "1ce5aec95ce32336fe47c8881361847c475b5254",
"sum": "64fMUPI3frXGj4X1FqFd1t7r04w3CUSmXaDcJ23EYbQ="
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet.git",
"subdir": "gen/grafonnet-v11.0.0"
"subdir": "gen/grafonnet-v11.1.0"
}
},
"version": "733beadbc8dab55c5fe1bcdcf0d8a2d215759a55",
"sum": "0BvzR0i4bS4hc2O3xDv6i9m52z7mPrjvqxtcPrGhynA="
"version": "1ce5aec95ce32336fe47c8881361847c475b5254",
"sum": "41w7p/rwrNsITqNHMXtGSJAfAyKmnflg6rFhKBduUxM="
},
{
"source": {
......@@ -88,7 +88,7 @@
"subdir": "grafana-builder"
}
},
"version": "a41bfeae97007b9ed047823e44974ce1a0817ca9",
"version": "2c38760394b41de9b7477e8ab26e9a24ed85b589",
"sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo="
},
{
......
local domain = 'tdude.co';
local ingress(name, namespace, hosts, rules, authenticated) = {
local ingress(name, namespace, hosts, rules, authenticated, domain) = {
apiVersion: 'networking.k8s.io/v1',
kind: 'Ingress',
metadata: {
......@@ -195,7 +193,7 @@ local fromTraefik() = {
]
};
local kp =
local kp = function(domain)
(import 'kube-prometheus/main.libsonnet') +
// Uncomment the following imports to enable its patches
// (import 'kube-prometheus/addons/anti-affinity.libsonnet') +
......@@ -412,7 +410,7 @@ local kp =
},
}],
},
}], false),
}], false, domain),
backend: ingress('backend', $.values.common.namespace, ['pyrra.monitoring.' + domain, 'prometheus.monitoring.' + domain, 'alertmanager.monitoring.' + domain, 'monitoring.' + domain], [
{
host: 'pyrra.monitoring.' + domain,
......@@ -465,15 +463,15 @@ local kp =
}],
},
},
], true),
], true, domain),
},
};
// We need to inject some secrets as environment variables
// We can't use a configMap because there's already a generated config
// We also want temporary stateful storage with a PVC
local modifiedGrafana = kp.grafana {
local g = kp.grafana,
local modifiedGrafana = function(kpd) kpd.grafana {
local g = kpd.grafana,
deployment+: {
spec+: {
strategy: { type: 'Recreate' },
......@@ -491,40 +489,41 @@ local modifiedGrafana = kp.grafana {
},
};
local manifests =
local manifests = function(kpd)
// Uncomment line below to enable vertical auto scaling of kube-state-metrics
//{ ['ksm-autoscaler-' + name]: kp.ksmAutoscaler[name] for name in std.objectFields(kp.ksmAutoscaler) } +
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
//{ ['ksm-autoscaler-' + name]: kpd.ksmAutoscaler[name] for name in std.objectFields(kpd.ksmAutoscaler) } +
{ ['setup/0namespace-' + name]: kpd.kubePrometheus[name] for name in std.objectFields(kpd.kubePrometheus) } +
{
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
['setup/prometheus-operator-' + name]: kpd.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kpd.prometheusOperator))
} +
{ 'setup/pyrra-slo-CustomResourceDefinition': kp.pyrra.crd } +
{ 'setup/pyrra-slo-CustomResourceDefinition': kpd.pyrra.crd } +
// serviceMonitor is separated so that it can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['grafana-' + name]: modifiedGrafana[name] for name in std.objectFields(modifiedGrafana) } +
{ ['pyrra-' + name]: kp.pyrra[name] for name in std.objectFields(kp.pyrra) if name != 'crd' } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
{ 'prometheus-operator-serviceMonitor': kpd.prometheusOperator.serviceMonitor } +
{ ['alertmanager-' + name]: kpd.alertmanager[name] for name in std.objectFields(kpd.alertmanager) } +
{ ['grafana-' + name]: modifiedGrafana(kpd)[name] for name in std.objectFields(modifiedGrafana(kpd)) } +
{ ['pyrra-' + name]: kpd.pyrra[name] for name in std.objectFields(kpd.pyrra) if name != 'crd' } +
{ ['blackbox-exporter-' + name]: kpd.blackboxExporter[name] for name in std.objectFields(kpd.blackboxExporter) } +
{ ['kube-state-metrics-' + name]: kpd.kubeStateMetrics[name] for name in std.objectFields(kpd.kubeStateMetrics) } +
{ ['kubernetes-' + name]: kpd.kubernetesControlPlane[name] for name in std.objectFields(kpd.kubernetesControlPlane) } +
{ ['node-exporter-' + name]: kpd.nodeExporter[name] for name in std.objectFields(kpd.nodeExporter) } +
{ ['prometheus-' + name]: kpd.prometheus[name] for name in std.objectFields(kpd.prometheus) } +
{ ['prometheus-adapter-' + name]: kpd.prometheusAdapter[name] for name in std.objectFields(kpd.prometheusAdapter) } +
{ ['alertmanager-discord-' + name]: alertmanagerDiscord[name] for name in std.objectFields(alertmanagerDiscord) } +
{ [name + '-ingress']: kp.ingress[name] for name in std.objectFields(kp.ingress) } +
{ [name + '-ingress']: kpd.ingress[name] for name in std.objectFields(kpd.ingress) } +
//{ 'external-mixins/mysqld-mixin-prometheus-rules': mysqldMixin.prometheusRules }
//{ 'external-mixins/postgres-mixin-prometheus-rules': postgresMixin.prometheusRules }
{ 'elasticsearch-mixin-prometheus-rules': elasticsearchMixin.prometheusRules }
{ 'etcd-mixin-prometheus-rules': etcdMixin.prometheusRules };
local kustomizationResourceFile(name) = './manifests/' + name + '.yaml';
local kustomization = {
local kustomization = function(kpd) {
apiVersion: 'kustomize.config.k8s.io/v1beta1',
kind: 'Kustomization',
resources: std.map(kustomizationResourceFile, std.objectFields(manifests)),
resources: std.map(kustomizationResourceFile, std.objectFields(manifests(kpd))),
};
manifests {
'../kustomization': kustomization,
function(domain)
manifests(kp(domain)) {
'../kustomization': kustomization(kp(domain)),
}
{
"apiVersion": "monitoring.coreos.com/v1",
"kind": "Alertmanager",
"metadata": {
"labels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.27.0"
},
"name": "main",
"namespace": "monitoring"
},
"spec": {
"image": "quay.io/prometheus/alertmanager:v0.27.0",
"nodeSelector": {
"kubernetes.io/os": "linux"
},
"podMetadata": {
"labels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.27.0"
}
},
"replicas": 3,
"resources": {
"limits": {
"cpu": "100m",
"memory": "100Mi"
},
"requests": {
"cpu": "4m",
"memory": "100Mi"
}
},
"secrets": [ ],
"securityContext": {
"fsGroup": 2000,
"runAsNonRoot": true,
"runAsUser": 1000
},
"serviceAccountName": "alertmanager-main",
"version": "0.27.0"
}
}
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "alertmanager-discord",
"namespace": "monitoring"
},
"spec": {
"replicas": 2,
"selector": {
"matchLabels": {
"app.kubernetes.io/name": "alertmanager-discord"
}
},
"template": {
"metadata": {
"labels": {
"app.kubernetes.io/name": "alertmanager-discord"
}
},
"spec": {
"containers": [
{
"env": [
{
"name": "DISCORD_WEBHOOK",
"valueFrom": {
"secretKeyRef": {
"key": "DISCORD_WEBHOOK",
"name": "discord-webhook"
}
}
}
],
"image": "benjojo/alertmanager-discord:latest",
"name": "alertmanager-discord",
"ports": [
{
"containerPort": 9094,
"name": "web"
}
],
"resources": {
"limits": {
"cpu": "100m"
},
"requests": {
"cpu": "50m"
}
}
}
]
}
}
}
}
{
"apiVersion": "networking.k8s.io/v1",
"kind": "NetworkPolicy",
"metadata": {
"name": "alertmanager-discord",
"namespace": "monitoring"
},
"spec": {
"ingress": [
{
"from": [
{
"podSelector": {
"matchLabels": {
"app.kubernetes.io/name": "alertmanager"
}
}
}
],
"ports": [
{
"port": 9094,
"protocol": "TCP"
}
]
}
],
"podSelector": {
"matchLabels": {
"app.kubernetes.io/name": "alertmanager-discord"
}
},
"policyTypes": [
"Ingress"
]
}
}
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "alertmanager-discord",
"namespace": "monitoring"
},
"spec": {
"ports": [
{
"name": "web",
"port": 9094,
"targetPort": "web"
}
],
"selector": {
"app.kubernetes.io/name": "alertmanager-discord"
}
}
}
{
"apiVersion": "networking.k8s.io/v1",
"kind": "NetworkPolicy",
"metadata": {
"labels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.27.0"
},
"name": "alertmanager-main",
"namespace": "monitoring"
},
"spec": {
"egress": [
{ }
],
"ingress": [
{
"from": [
{
"podSelector": {
"matchLabels": {
"app.kubernetes.io/name": "prometheus"
}
}
}
],
"ports": [
{
"port": 9093,
"protocol": "TCP"
},
{
"port": 8080,
"protocol": "TCP"
}
]
},
{
"from": [
{
"podSelector": {
"matchLabels": {
"app.kubernetes.io/name": "alertmanager"
}
}
}
],
"ports": [
{
"port": 9094,
"protocol": "TCP"
},
{
"port": 9094,
"protocol": "UDP"
}
]
},
{
"from": [
{
"namespaceSelector": {
"matchLabels": {
"kubernetes.io/metadata.name": "traefik"
}
},
"podSelector": {
"matchLabels": {
"app.kubernetes.io/name": "traefik"
}
}
}
],
"ports": [
{
"port": 9093,
"protocol": "TCP"
}
]
}
],
"podSelector": {
"matchLabels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus"
}
},
"policyTypes": [
"Egress",
"Ingress"
]
}
}
{
"apiVersion": "policy/v1",
"kind": "PodDisruptionBudget",
"metadata": {
"labels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.27.0"
},
"name": "alertmanager-main",
"namespace": "monitoring"
},
"spec": {
"maxUnavailable": 1,
"selector": {
"matchLabels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus"
}
}
}
}
{
"apiVersion": "monitoring.coreos.com/v1",
"kind": "PrometheusRule",
"metadata": {
"labels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.27.0",
"prometheus": "k8s",
"role": "alert-rules"
},
"name": "alertmanager-main-rules",
"namespace": "monitoring"
},
"spec": {
"groups": [
{
"name": "alertmanager.rules",
"rules": [
{
"alert": "AlertmanagerFailedReload",
"annotations": {
"description": "Configuration has failed to load for {{ $labels.namespace }}/{{ $labels.pod}}.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedreload",
"summary": "Reloading an Alertmanager configuration has failed."
},
"expr": "# Without max_over_time, failed scrapes could create false negatives, see\n# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.\nmax_over_time(alertmanager_config_last_reload_successful{job=\"alertmanager-main\",namespace=\"monitoring\"}[5m]) == 0\n",
"for": "10m",
"labels": {
"severity": "critical"
}
},
{
"alert": "AlertmanagerMembersInconsistent",
"annotations": {
"description": "Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} has only found {{ $value }} members of the {{$labels.job}} cluster.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagermembersinconsistent",
"summary": "A member of an Alertmanager cluster has not found all other cluster members."
},
"expr": "# Without max_over_time, failed scrapes could create false negatives, see\n# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.\n max_over_time(alertmanager_cluster_members{job=\"alertmanager-main\",namespace=\"monitoring\"}[5m])\n< on (namespace,service) group_left\n count by (namespace,service) (max_over_time(alertmanager_cluster_members{job=\"alertmanager-main\",namespace=\"monitoring\"}[5m]))\n",
"for": "15m",
"labels": {
"severity": "critical"
}
},
{
"alert": "AlertmanagerFailedToSendAlerts",
"annotations": {
"description": "Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} failed to send {{ $value | humanizePercentage }} of notifications to {{ $labels.integration }}.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedtosendalerts",
"summary": "An Alertmanager instance failed to send notifications."
},
"expr": "(\n rate(alertmanager_notifications_failed_total{job=\"alertmanager-main\",namespace=\"monitoring\"}[5m])\n/\n ignoring (reason) group_left rate(alertmanager_notifications_total{job=\"alertmanager-main\",namespace=\"monitoring\"}[5m])\n)\n> 0.01\n",
"for": "5m",
"labels": {
"severity": "warning"
}
},
{
"alert": "AlertmanagerClusterFailedToSendAlerts",
"annotations": {
"description": "The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts",
"summary": "All Alertmanager instances in a cluster failed to send notifications to a critical integration."
},
"expr": "min by (namespace,service, integration) (\n rate(alertmanager_notifications_failed_total{job=\"alertmanager-main\",namespace=\"monitoring\", integration=~`.*`}[5m])\n/\n ignoring (reason) group_left rate(alertmanager_notifications_total{job=\"alertmanager-main\",namespace=\"monitoring\", integration=~`.*`}[5m])\n)\n> 0.01\n",
"for": "5m",
"labels": {
"severity": "critical"
}
},
{
"alert": "AlertmanagerClusterFailedToSendAlerts",
"annotations": {
"description": "The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts",
"summary": "All Alertmanager instances in a cluster failed to send notifications to a non-critical integration."
},
"expr": "min by (namespace,service, integration) (\n rate(alertmanager_notifications_failed_total{job=\"alertmanager-main\",namespace=\"monitoring\", integration!~`.*`}[5m])\n/\n ignoring (reason) group_left rate(alertmanager_notifications_total{job=\"alertmanager-main\",namespace=\"monitoring\", integration!~`.*`}[5m])\n)\n> 0.01\n",
"for": "5m",
"labels": {
"severity": "warning"
}
},
{
"alert": "AlertmanagerConfigInconsistent",
"annotations": {
"description": "Alertmanager instances within the {{$labels.job}} cluster have different configurations.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerconfiginconsistent",
"summary": "Alertmanager instances within the same cluster have different configurations."
},
"expr": "count by (namespace,service) (\n count_values by (namespace,service) (\"config_hash\", alertmanager_config_hash{job=\"alertmanager-main\",namespace=\"monitoring\"})\n)\n!= 1\n",
"for": "20m",
"labels": {
"severity": "critical"
}
},
{
"alert": "AlertmanagerClusterDown",
"annotations": {
"description": "{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have been up for less than half of the last 5m.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterdown",
"summary": "Half or more of the Alertmanager instances within the same cluster are down."
},
"expr": "(\n count by (namespace,service) (\n avg_over_time(up{job=\"alertmanager-main\",namespace=\"monitoring\"}[5m]) < 0.5\n )\n/\n count by (namespace,service) (\n up{job=\"alertmanager-main\",namespace=\"monitoring\"}\n )\n)\n>= 0.5\n",
"for": "5m",
"labels": {
"severity": "critical"
}
},
{
"alert": "AlertmanagerClusterCrashlooping",
"annotations": {
"description": "{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have restarted at least 5 times in the last 10m.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclustercrashlooping",
"summary": "Half or more of the Alertmanager instances within the same cluster are crashlooping."
},
"expr": "(\n count by (namespace,service) (\n changes(process_start_time_seconds{job=\"alertmanager-main\",namespace=\"monitoring\"}[10m]) > 4\n )\n/\n count by (namespace,service) (\n up{job=\"alertmanager-main\",namespace=\"monitoring\"}\n )\n)\n>= 0.5\n",
"for": "5m",
"labels": {
"severity": "critical"
}
}
]
}
]
}
}
{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"labels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.27.0"
},
"name": "alertmanager-main",
"namespace": "monitoring"
},
"stringData": {
"alertmanager.yaml": "global:\n resolve_timeout: 1m\n\nroute:\n group_by: ['job']\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 4h\n\n # Default receiver.\n receiver: 'null'\n\n # Different routes\n routes:\n - match:\n alertname: Watchdog\n receiver: 'null'\n - receiver: 'discord-notifications'\n \nreceivers:\n - name: 'null'\n - name: 'discord-notifications'\n webhook_configs:\n - url: 'http://alertmanager-discord:9094'\n"
},
"type": "Opaque"
}
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"labels": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.27.0"
},
"name": "alertmanager-main",
"namespace": "monitoring"
},
"spec": {
"ports": [
{
"name": "web",
"port": 9093,
"targetPort": "web"
},
{
"name": "reloader-web",
"port": 8080,
"targetPort": "reloader-web"
}
],
"selector": {
"app.kubernetes.io/component": "alert-router",
"app.kubernetes.io/instance": "main",
"app.kubernetes.io/name": "alertmanager",
"app.kubernetes.io/part-of": "kube-prometheus"
},
"sessionAffinity": "ClientIP"
}
}