chore(All Markdown Files): auto wrap text, fix typos

This commit is contained in:
Ryan Yin
2024-03-16 19:45:36 +08:00
parent 1e38f7bb09
commit 0eb83b22f0
79 changed files with 2477 additions and 2896 deletions

View File

@@ -11,4 +11,3 @@ Host running operation and maintenance related services:
1. prometheus + alertmanager + grafana + loki: Monitor the metrics/logs of my homelab.
1. restic: Backup my personal data to cloud or NAS.
1. synthing: Sync file between android/macbook/PC and NAS.

View File

@@ -2,7 +2,7 @@ apiVersion: 1
providers:
# <string> an unique provider name. Required
- name: 'Dashboards'
- name: "Dashboards"
# <int> Org id. Default to 1
orgId: 1
# <string> provider type. Default to 'file'

View File

@@ -1,4 +1,4 @@
# Grafana Dashbaords
# Grafana Dashboards
## Homelab
@@ -8,4 +8,3 @@
## Kubernetes
1. https://github.com/dotdc/grafana-dashboards-kubernetes/

View File

@@ -128,11 +128,7 @@
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"colors": ["#299c46", "rgba(237, 129, 40, 0.89)", "#d44a3a"],
"datasource": "$datasource",
"decimals": 0,
"format": "short",
@@ -297,7 +293,7 @@
"unit": "s"
},
{
"alias": "Last reload sucessfull",
"alias": "Last reload successful",
"colorMode": "cell",
"colors": [
"rgba(245, 54, 54, 0.9)",
@@ -308,10 +304,7 @@
"decimals": 2,
"mappingType": 1,
"pattern": "Value #C",
"thresholds": [
"0",
"1"
],
"thresholds": ["0", "1"],
"type": "number",
"unit": "short"
},
@@ -363,11 +356,7 @@
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"colors": ["#299c46", "rgba(237, 129, 40, 0.89)", "#d44a3a"],
"datasource": "$datasource",
"decimals": 0,
"description": "Number of peers in the Alertmanager cluster.",
@@ -445,11 +434,7 @@
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"colors": ["#299c46", "rgba(237, 129, 40, 0.89)", "#d44a3a"],
"datasource": "$datasource",
"description": "Current number of active alerts.",
"format": "none",
@@ -526,11 +511,7 @@
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"colors": ["#299c46", "rgba(237, 129, 40, 0.89)", "#d44a3a"],
"datasource": "$datasource",
"description": "Current number of suppressed alerts.",
"format": "none",
@@ -607,11 +588,7 @@
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"colors": ["#299c46", "rgba(237, 129, 40, 0.89)", "#d44a3a"],
"datasource": "$datasource",
"description": "Current number of active silences.",
"format": "none",
@@ -2469,7 +2446,7 @@
"expr": "increase(alertmanager_cluster_reconnections_total{instance=~\"$instance\"}[$__interval])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Sucessful reconnections",
"legendFormat": "Successful reconnections",
"refId": "A"
},
{
@@ -2578,7 +2555,7 @@
"expr": "increase(alertmanager_cluster_reconnections_total{instance=~\"$instance\"}[$__interval])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Sucessful reconnections",
"legendFormat": "Successful reconnections",
"refId": "A"
},
{
@@ -2687,7 +2664,7 @@
"expr": "increase(alertmanager_cluster_reconnections_total{instance=~\"$instance\"}[$__interval])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Sucessful reconnections",
"legendFormat": "Successful reconnections",
"refId": "A"
},
{
@@ -2796,7 +2773,7 @@
"expr": "increase(alertmanager_cluster_reconnections_total{instance=~\"$instance\"}[$__interval])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Sucessful reconnections",
"legendFormat": "Successful reconnections",
"refId": "A"
},
{
@@ -10161,15 +10138,11 @@
"format": "time_series",
"groupBy": [
{
"params": [
"$__interval"
],
"params": ["$__interval"],
"type": "time"
},
{
"params": [
"null"
],
"params": ["null"],
"type": "fill"
}
],
@@ -10183,9 +10156,7 @@
"select": [
[
{
"params": [
"value"
],
"params": ["value"],
"type": "field"
},
{
@@ -10305,15 +10276,11 @@
"format": "time_series",
"groupBy": [
{
"params": [
"$__interval"
],
"params": ["$__interval"],
"type": "time"
},
{
"params": [
"null"
],
"params": ["null"],
"type": "fill"
}
],
@@ -10327,9 +10294,7 @@
"select": [
[
{
"params": [
"value"
],
"params": ["value"],
"type": "field"
},
{
@@ -10449,15 +10414,11 @@
"format": "time_series",
"groupBy": [
{
"params": [
"$__interval"
],
"params": ["$__interval"],
"type": "time"
},
{
"params": [
"null"
],
"params": ["null"],
"type": "fill"
}
],
@@ -10471,9 +10432,7 @@
"select": [
[
{
"params": [
"value"
],
"params": ["value"],
"type": "field"
},
{
@@ -10593,15 +10552,11 @@
"format": "time_series",
"groupBy": [
{
"params": [
"$__interval"
],
"params": ["$__interval"],
"type": "time"
},
{
"params": [
"null"
],
"params": ["null"],
"type": "fill"
}
],
@@ -10615,9 +10570,7 @@
"select": [
[
{
"params": [
"value"
],
"params": ["value"],
"type": "field"
},
{
@@ -11131,11 +11084,7 @@
"refresh": "5m",
"schemaVersion": 18,
"style": "dark",
"tags": [
"alertmanager",
"prometheus",
"alerting"
],
"tags": ["alertmanager", "prometheus", "alerting"],
"templating": {
"list": [
{
@@ -11185,32 +11134,11 @@
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
"refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"],
"time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"]
},
"timezone": "",
"title": "Alertmanager",
"uid": "eea-9_sik",
"version": 27
}
}

View File

@@ -131,9 +131,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -198,9 +196,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -320,9 +316,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -441,11 +435,7 @@
"id": 93,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -540,11 +530,7 @@
"id": 96,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -643,11 +629,7 @@
"id": 74,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -794,11 +776,7 @@
"id": 84,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -897,11 +875,7 @@
"id": 75,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1000,11 +974,7 @@
"id": 85,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1129,11 +1099,7 @@
"id": 59,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1244,11 +1210,7 @@
"id": 60,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1347,11 +1309,7 @@
"id": 101,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1450,11 +1408,7 @@
"id": 102,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1589,11 +1543,7 @@
"id": 90,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1692,11 +1642,7 @@
"id": 95,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1807,11 +1753,7 @@
"id": 73,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -1910,11 +1852,7 @@
"id": 86,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "list",
"placement": "right",
"showLegend": false
@@ -2057,11 +1995,7 @@
"id": 29,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": false
@@ -2161,11 +2095,7 @@
"id": 51,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": false
@@ -2290,11 +2220,7 @@
"id": 62,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true
@@ -2393,11 +2319,7 @@
"id": 87,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true
@@ -3007,10 +2929,7 @@
"revision": 1,
"schemaVersion": 38,
"style": "dark",
"tags": [
"Kubernetes",
"Prometheus"
],
"tags": ["Kubernetes", "Prometheus"],
"templating": {
"list": [
{
@@ -3147,4 +3066,4 @@
"uid": "k8s_addons_prometheus",
"version": 3,
"weekStart": ""
}
}

View File

@@ -1,47 +1,47 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__elements": [],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "8.5.0"
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "5.0.0"
},
{
"type": "panel",
"id": "timeseries",
"name": "Time series",
"version": ""
},
{
"type": "panel",
"id": "stat",
"name": "Stat",
"version": ""
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": ""
}
],
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__elements": [],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "8.5.0"
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "5.0.0"
},
{
"type": "panel",
"id": "timeseries",
"name": "Time series",
"version": ""
},
{
"type": "panel",
"id": "stat",
"name": "Stat",
"version": ""
},
{
"type": "panel",
"id": "table",
"name": "Table",
"version": ""
}
],
"annotations": {
"list": [
{
@@ -129,9 +129,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -197,9 +195,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -265,9 +261,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -333,9 +327,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -401,9 +393,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -469,9 +459,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -863,12 +851,8 @@
"options": {
"footer": {
"enablePagination": true,
"fields": [
"Value"
],
"reducer": [
"sum"
],
"fields": ["Value"],
"reducer": ["sum"],
"show": false
},
"showHeader": true,
@@ -1093,9 +1077,7 @@
"footer": {
"enablePagination": true,
"fields": "",
"reducer": [
"sum"
],
"reducer": ["sum"],
"show": false
},
"showHeader": true,
@@ -1182,9 +1164,7 @@
"operation": "groupby"
},
"Value": {
"aggregations": [
"lastNotNull"
]
"aggregations": ["lastNotNull"]
},
"Vulnerability": {
"aggregations": [],
@@ -1274,9 +1254,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -1342,9 +1320,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -1410,9 +1386,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -1478,9 +1452,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -1546,9 +1518,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -1882,9 +1852,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -1950,9 +1918,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -2018,9 +1984,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -2086,9 +2050,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -2154,9 +2116,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -2550,12 +2510,7 @@
"refresh": "30s",
"schemaVersion": 37,
"style": "dark",
"tags": [
"Prometheus",
"Addons",
"Trivy",
"Trivy-operator"
],
"tags": ["Prometheus", "Addons", "Trivy", "Trivy-operator"],
"templating": {
"list": [
{
@@ -2607,12 +2562,8 @@
{
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
"text": ["All"],
"value": ["$__all"]
},
"datasource": {
"type": "prometheus",

View File

@@ -114,9 +114,7 @@
"justifyMode": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -195,9 +193,7 @@
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"reducer": ["sum"],
"show": false
},
"showHeader": true,
@@ -227,14 +223,7 @@
{
"id": "labelsToFields",
"options": {
"keepLabels": [
"group",
"job",
"removed_release",
"resource",
"version",
"name"
],
"keepLabels": ["group", "job", "removed_release", "resource", "version", "name"],
"mode": "columns"
}
},
@@ -268,9 +257,7 @@
"options": {
"fields": {
"group": {
"aggregations": [
"lastNotNull"
],
"aggregations": ["lastNotNull"],
"operation": "groupby"
},
"job": {
@@ -278,9 +265,7 @@
"operation": "groupby"
},
"namespace": {
"aggregations": [
"lastNotNull"
],
"aggregations": ["lastNotNull"],
"operation": "groupby"
},
"removed_release": {
@@ -288,9 +273,7 @@
"operation": "groupby"
},
"resource": {
"aggregations": [
"lastNotNull"
],
"aggregations": ["lastNotNull"],
"operation": "groupby"
},
"version": {
@@ -1248,10 +1231,7 @@
"refresh": "30s",
"schemaVersion": 38,
"style": "dark",
"tags": [
"Kubernetes",
"Prometheus"
],
"tags": ["Kubernetes", "Prometheus"],
"templating": {
"list": [
{

View File

@@ -114,9 +114,7 @@
"justifyMode": "auto",
"orientation": "vertical",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -1431,10 +1429,7 @@
],
"refresh": "30s",
"schemaVersion": 38,
"tags": [
"Kubernetes",
"Prometheus"
],
"tags": ["Kubernetes", "Prometheus"],
"templating": {
"list": [
{
@@ -1595,12 +1590,8 @@
{
"current": {
"selected": true,
"text": [
"coredns"
],
"value": [
"coredns"
]
"text": ["coredns"],
"value": ["coredns"]
},
"definition": "label_values(coredns_build_info{cluster=\"$cluster\"},job)",
"hide": 0,

View File

@@ -134,9 +134,7 @@
"namePlacement": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -234,9 +232,7 @@
"namePlacement": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -323,9 +319,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -421,11 +415,7 @@
"id": 52,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -631,9 +621,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -694,9 +682,7 @@
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -796,9 +782,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -897,9 +881,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -1121,12 +1103,7 @@
"id": 55,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max",
"min"
],
"calcs": ["mean", "lastNotNull", "max", "min"],
"displayMode": "hidden",
"placement": "right",
"showLegend": false
@@ -1229,11 +1206,7 @@
"id": 46,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1332,11 +1305,7 @@
"id": 50,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1432,11 +1401,7 @@
"id": 54,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1531,11 +1496,7 @@
"id": 73,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1637,11 +1598,7 @@
"id": 82,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1743,11 +1700,7 @@
"id": 83,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1857,11 +1810,7 @@
"id": 84,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1970,11 +1919,7 @@
"id": 85,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -2072,11 +2017,7 @@
"id": 87,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -2174,11 +2115,7 @@
"id": 88,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -2880,10 +2817,7 @@
],
"refresh": "30s",
"schemaVersion": 39,
"tags": [
"Kubernetes",
"Prometheus"
],
"tags": ["Kubernetes", "Prometheus"],
"templating": {
"list": [
{

View File

@@ -133,9 +133,7 @@
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -209,9 +207,7 @@
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -307,11 +303,7 @@
"id": 32,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -475,9 +467,7 @@
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -577,9 +567,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -931,11 +919,7 @@
"id": 68,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1047,11 +1031,7 @@
"id": 70,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1162,11 +1142,7 @@
"id": 72,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1266,11 +1242,7 @@
"id": 74,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -1370,11 +1342,7 @@
"id": 75,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -2154,10 +2122,7 @@
],
"refresh": "30s",
"schemaVersion": 39,
"tags": [
"Kubernetes",
"Prometheus"
],
"tags": ["Kubernetes", "Prometheus"],
"templating": {
"list": [
{

View File

@@ -139,9 +139,7 @@
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -213,9 +211,7 @@
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -277,9 +273,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -420,9 +414,7 @@
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"reducer": ["sum"],
"show": false
},
"showHeader": true,
@@ -506,9 +498,7 @@
"operation": "groupby"
},
"namespace": {
"aggregations": [
"last"
],
"aggregations": ["last"],
"operation": "groupby"
},
"pod": {
@@ -566,9 +556,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -631,9 +619,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -695,9 +681,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -760,9 +744,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -833,9 +815,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -1416,11 +1396,7 @@
"id": 66,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -2795,9 +2771,7 @@
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"reducer": ["sum"],
"show": false
},
"showHeader": true,
@@ -2839,21 +2813,15 @@
"options": {
"fields": {
"Value": {
"aggregations": [
"lastNotNull"
],
"aggregations": ["lastNotNull"],
"operation": "aggregate"
},
"Value #A": {
"aggregations": [
"lastNotNull"
],
"aggregations": ["lastNotNull"],
"operation": "aggregate"
},
"Value #B": {
"aggregations": [
"lastNotNull"
],
"aggregations": ["lastNotNull"],
"operation": "aggregate"
},
"persistentvolumeclaim": {
@@ -3790,10 +3758,7 @@
],
"refresh": "30s",
"schemaVersion": 39,
"tags": [
"Kubernetes",
"Prometheus"
],
"tags": ["Kubernetes", "Prometheus"],
"templating": {
"list": [
{

View File

@@ -137,9 +137,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"calcs": ["mean"],
"fields": "",
"values": false
},
@@ -205,9 +203,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"calcs": ["mean"],
"fields": "",
"values": false
},
@@ -267,9 +263,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"calcs": ["mean"],
"fields": "",
"values": false
},
@@ -329,9 +323,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"mean"
],
"calcs": ["mean"],
"fields": "",
"values": false
},
@@ -660,9 +652,7 @@
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -732,9 +722,7 @@
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -808,9 +796,7 @@
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -880,9 +866,7 @@
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"calcs": ["last"],
"fields": "",
"values": false
},
@@ -1000,9 +984,7 @@
"footer": {
"countRows": false,
"fields": "",
"reducer": [
"sum"
],
"reducer": ["sum"],
"show": false
},
"showHeader": true,
@@ -1532,11 +1514,7 @@
"id": 29,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true
@@ -1638,11 +1616,7 @@
"id": 51,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true
@@ -1744,11 +1718,7 @@
"id": 59,
"options": {
"legend": {
"calcs": [
"min",
"max",
"mean"
],
"calcs": ["min", "max", "mean"],
"displayMode": "table",
"placement": "right",
"showLegend": true,
@@ -2475,10 +2445,7 @@
],
"refresh": "30s",
"schemaVersion": 38,
"tags": [
"Kubernetes",
"Prometheus"
],
"tags": ["Kubernetes", "Prometheus"],
"templating": {
"list": [
{

View File

@@ -12,10 +12,10 @@ datasources:
manageAlerts: true
prometheusType: Prometheus
prometheusVersion: 2.49.0
cacheLevel: 'High'
cacheLevel: "High"
disableRecordingRules: false
# As of Grafana 10, the Prometheus data source can be configured to query live dashboards
# As of Grafana 10, the Prometheus data source can be configured to query live dashboards
# incrementally, instead of re-querying the entire duration on each dashboard refresh.
# Increasing the duration of the incrementalQueryOverlapWindow will increase the size of every incremental query,
# Increasing the duration of the incrementalQueryOverlapWindow will increase the size of every incremental query,
# but might be helpful for instances that have inconsistent results for recent data.
incrementalQueryOverlapWindow: 10m

View File

@@ -2,6 +2,5 @@
## Alert Rules
- [awesome-prometheus-alerts](https://github.com/samber/awesome-prometheus-alerts): Collection of Prometheus alerting rules
- [awesome-prometheus-alerts](https://github.com/samber/awesome-prometheus-alerts): Collection of
Prometheus alerting rules

View File

@@ -1,14 +1,13 @@
groups:
- name: EmbeddedExporter
- name: EmbeddedExporter
rules:
- alert: CorednsPanicCount
expr: 'increase(coredns_panics_total[1m]) > 0'
for: 0m
labels:
severity: critical
annotations:
summary: CoreDNS Panic Count (instance {{ $labels.instance }})
description: "Number of CoreDNS panics encountered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
rules:
- alert: CorednsPanicCount
expr: "increase(coredns_panics_total[1m]) > 0"
for: 0m
labels:
severity: critical
annotations:
summary: CoreDNS Panic Count (instance {{ $labels.instance }})
description:
"Number of CoreDNS panics encountered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -1,122 +1,162 @@
groups:
- name: EmbeddedExporter
- name: EmbeddedExporter
rules:
- alert: EtcdInsufficientMembers
expr: "count(etcd_server_id) % 2 == 0"
for: 0m
labels:
severity: critical
annotations:
summary: Etcd insufficient Members (instance {{ $labels.instance }})
description:
"Etcd cluster should have an odd number of members\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
rules:
- alert: EtcdNoLeader
expr: "etcd_server_has_leader == 0"
for: 0m
labels:
severity: critical
annotations:
summary: Etcd no Leader (instance {{ $labels.instance }})
description:
"Etcd cluster have no leader\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdInsufficientMembers
expr: 'count(etcd_server_id) % 2 == 0'
for: 0m
labels:
severity: critical
annotations:
summary: Etcd insufficient Members (instance {{ $labels.instance }})
description: "Etcd cluster should have an odd number of members\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfLeaderChanges
expr: "increase(etcd_server_leader_changes_seen_total[10m]) > 2"
for: 0m
labels:
severity: warning
annotations:
summary: Etcd high number of leader changes (instance {{ $labels.instance }})
description:
"Etcd leader changed more than 2 times during 10 minutes\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: EtcdNoLeader
expr: 'etcd_server_has_leader == 0'
for: 0m
labels:
severity: critical
annotations:
summary: Etcd no Leader (instance {{ $labels.instance }})
description: "Etcd cluster have no leader\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfFailedGrpcRequests
expr:
'sum(rate(grpc_server_handled_total{grpc_code!="OK"}[1m])) BY (grpc_service, grpc_method)
/ sum(rate(grpc_server_handled_total[1m])) BY (grpc_service, grpc_method) > 0.01'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high number of failed GRPC requests (instance {{ $labels.instance }})
description:
"More than 1% GRPC request failure detected in Etcd\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
- alert: EtcdHighNumberOfLeaderChanges
expr: 'increase(etcd_server_leader_changes_seen_total[10m]) > 2'
for: 0m
labels:
severity: warning
annotations:
summary: Etcd high number of leader changes (instance {{ $labels.instance }})
description: "Etcd leader changed more than 2 times during 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfFailedGrpcRequests
expr:
'sum(rate(grpc_server_handled_total{grpc_code!="OK"}[1m])) BY (grpc_service, grpc_method)
/ sum(rate(grpc_server_handled_total[1m])) BY (grpc_service, grpc_method) > 0.05'
for: 2m
labels:
severity: critical
annotations:
summary: Etcd high number of failed GRPC requests (instance {{ $labels.instance }})
description:
"More than 5% GRPC request failure detected in Etcd\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
- alert: EtcdHighNumberOfFailedGrpcRequests
expr: 'sum(rate(grpc_server_handled_total{grpc_code!="OK"}[1m])) BY (grpc_service, grpc_method) / sum(rate(grpc_server_handled_total[1m])) BY (grpc_service, grpc_method) > 0.01'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high number of failed GRPC requests (instance {{ $labels.instance }})
description: "More than 1% GRPC request failure detected in Etcd\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdGrpcRequestsSlow
expr:
'histogram_quantile(0.99,
sum(rate(grpc_server_handling_seconds_bucket{grpc_type="unary"}[1m])) by (grpc_service,
grpc_method, le)) > 0.15'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd GRPC requests slow (instance {{ $labels.instance }})
description:
"GRPC requests slowing down, 99th percentile is over 0.15s\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfFailedGrpcRequests
expr: 'sum(rate(grpc_server_handled_total{grpc_code!="OK"}[1m])) BY (grpc_service, grpc_method) / sum(rate(grpc_server_handled_total[1m])) BY (grpc_service, grpc_method) > 0.05'
for: 2m
labels:
severity: critical
annotations:
summary: Etcd high number of failed GRPC requests (instance {{ $labels.instance }})
description: "More than 5% GRPC request failure detected in Etcd\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfFailedHttpRequests
expr:
"sum(rate(etcd_http_failed_total[1m])) BY (method) /
sum(rate(etcd_http_received_total[1m])) BY (method) > 0.01"
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high number of failed HTTP requests (instance {{ $labels.instance }})
description:
"More than 1% HTTP failure detected in Etcd\n VALUE = {{ $value }}\n LABELS = {{
$labels }}"
- alert: EtcdGrpcRequestsSlow
expr: 'histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{grpc_type="unary"}[1m])) by (grpc_service, grpc_method, le)) > 0.15'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd GRPC requests slow (instance {{ $labels.instance }})
description: "GRPC requests slowing down, 99th percentile is over 0.15s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfFailedHttpRequests
expr:
"sum(rate(etcd_http_failed_total[1m])) BY (method) /
sum(rate(etcd_http_received_total[1m])) BY (method) > 0.05"
for: 2m
labels:
severity: critical
annotations:
summary: Etcd high number of failed HTTP requests (instance {{ $labels.instance }})
description:
"More than 5% HTTP failure detected in Etcd\n VALUE = {{ $value }}\n LABELS = {{
$labels }}"
- alert: EtcdHighNumberOfFailedHttpRequests
expr: 'sum(rate(etcd_http_failed_total[1m])) BY (method) / sum(rate(etcd_http_received_total[1m])) BY (method) > 0.01'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high number of failed HTTP requests (instance {{ $labels.instance }})
description: "More than 1% HTTP failure detected in Etcd\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHttpRequestsSlow
expr:
"histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[1m])) > 0.15"
for: 2m
labels:
severity: warning
annotations:
summary: Etcd HTTP requests slow (instance {{ $labels.instance }})
description:
"HTTP requests slowing down, 99th percentile is over 0.15s\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfFailedHttpRequests
expr: 'sum(rate(etcd_http_failed_total[1m])) BY (method) / sum(rate(etcd_http_received_total[1m])) BY (method) > 0.05'
for: 2m
labels:
severity: critical
annotations:
summary: Etcd high number of failed HTTP requests (instance {{ $labels.instance }})
description: "More than 5% HTTP failure detected in Etcd\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdMemberCommunicationSlow
expr:
"histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[1m])) >
0.15"
for: 2m
labels:
severity: warning
annotations:
summary: Etcd member communication slow (instance {{ $labels.instance }})
description:
"Etcd member communication slowing down, 99th percentile is over 0.15s\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: EtcdHttpRequestsSlow
expr: 'histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[1m])) > 0.15'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd HTTP requests slow (instance {{ $labels.instance }})
description: "HTTP requests slowing down, 99th percentile is over 0.15s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfFailedProposals
expr: "increase(etcd_server_proposals_failed_total[1h]) > 5"
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high number of failed proposals (instance {{ $labels.instance }})
description:
"Etcd server got more than 5 failed proposals past hour\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: EtcdMemberCommunicationSlow
expr: 'histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[1m])) > 0.15'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd member communication slow (instance {{ $labels.instance }})
description: "Etcd member communication slowing down, 99th percentile is over 0.15s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighFsyncDurations
expr:
"histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[1m])) > 0.5"
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high fsync durations (instance {{ $labels.instance }})
description:
"Etcd WAL fsync duration increasing, 99th percentile is over 0.5s\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: EtcdHighNumberOfFailedProposals
expr: 'increase(etcd_server_proposals_failed_total[1h]) > 5'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high number of failed proposals (instance {{ $labels.instance }})
description: "Etcd server got more than 5 failed proposals past hour\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighFsyncDurations
expr: 'histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[1m])) > 0.5'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high fsync durations (instance {{ $labels.instance }})
description: "Etcd WAL fsync duration increasing, 99th percentile is over 0.5s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighCommitDurations
expr: 'histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[1m])) > 0.25'
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high commit durations (instance {{ $labels.instance }})
description: "Etcd commit duration increasing, 99th percentile is over 0.25s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: EtcdHighCommitDurations
expr:
"histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[1m])) >
0.25"
for: 2m
labels:
severity: warning
annotations:
summary: Etcd high commit durations (instance {{ $labels.instance }})
description:
"Etcd commit duration increasing, 99th percentile is over 0.25s\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"

View File

@@ -1,95 +1,123 @@
groups:
- name: EmbeddedExporter
- name: EmbeddedExporter
rules:
- alert: IstioKubernetesGatewayAvailabilityDrop
expr:
'min(kube_deployment_status_replicas_available{deployment="istio-ingressgateway",
namespace="istio-system"}) without (instance, pod) < 2'
for: 1m
labels:
severity: warning
annotations:
summary: Istio Kubernetes gateway availability drop (instance {{ $labels.instance }})
description:
"Gateway pods have dropped. Inbound traffic will likely be affected.\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
rules:
- alert: IstioPilotHighTotalRequestRate
expr: "sum(rate(pilot_xds_push_errors[1m])) / sum(rate(pilot_xds_pushes[1m])) * 100 > 5"
for: 1m
labels:
severity: warning
annotations:
summary: Istio Pilot high total request rate (instance {{ $labels.instance }})
description:
"Number of Istio Pilot push errors is too high (> 5%). Envoy sidecars might have
outdated configuration.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioKubernetesGatewayAvailabilityDrop
expr: 'min(kube_deployment_status_replicas_available{deployment="istio-ingressgateway", namespace="istio-system"}) without (instance, pod) < 2'
for: 1m
labels:
severity: warning
annotations:
summary: Istio Kubernetes gateway availability drop (instance {{ $labels.instance }})
description: "Gateway pods have dropped. Inbound traffic will likely be affected.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioMixerPrometheusDispatchesLow
expr: 'sum(rate(mixer_runtime_dispatches_total{adapter=~"prometheus"}[1m])) < 180'
for: 1m
labels:
severity: warning
annotations:
summary: Istio Mixer Prometheus dispatches low (instance {{ $labels.instance }})
description:
"Number of Mixer dispatches to Prometheus is too low. Istio metrics might not be being
exported properly.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioPilotHighTotalRequestRate
expr: 'sum(rate(pilot_xds_push_errors[1m])) / sum(rate(pilot_xds_pushes[1m])) * 100 > 5'
for: 1m
labels:
severity: warning
annotations:
summary: Istio Pilot high total request rate (instance {{ $labels.instance }})
description: "Number of Istio Pilot push errors is too high (> 5%). Envoy sidecars might have outdated configuration.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioHighTotalRequestRate
expr: 'sum(rate(istio_requests_total{reporter="destination"}[5m])) > 1000'
for: 2m
labels:
severity: warning
annotations:
summary: Istio high total request rate (instance {{ $labels.instance }})
description:
"Global request rate in the service mesh is unusually high.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: IstioMixerPrometheusDispatchesLow
expr: 'sum(rate(mixer_runtime_dispatches_total{adapter=~"prometheus"}[1m])) < 180'
for: 1m
labels:
severity: warning
annotations:
summary: Istio Mixer Prometheus dispatches low (instance {{ $labels.instance }})
description: "Number of Mixer dispatches to Prometheus is too low. Istio metrics might not be being exported properly.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioLowTotalRequestRate
expr: 'sum(rate(istio_requests_total{reporter="destination"}[5m])) < 100'
for: 2m
labels:
severity: warning
annotations:
summary: Istio low total request rate (instance {{ $labels.instance }})
description:
"Global request rate in the service mesh is unusually low.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: IstioHighTotalRequestRate
expr: 'sum(rate(istio_requests_total{reporter="destination"}[5m])) > 1000'
for: 2m
labels:
severity: warning
annotations:
summary: Istio high total request rate (instance {{ $labels.instance }})
description: "Global request rate in the service mesh is unusually high.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioHigh4xxErrorRate
expr:
'sum(rate(istio_requests_total{reporter="destination", response_code=~"4.*"}[5m])) /
sum(rate(istio_requests_total{reporter="destination"}[5m])) * 100 > 5'
for: 1m
labels:
severity: warning
annotations:
summary: Istio high 4xx error rate (instance {{ $labels.instance }})
description:
"High percentage of HTTP 5xx responses in Istio (> 5%).\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: IstioLowTotalRequestRate
expr: 'sum(rate(istio_requests_total{reporter="destination"}[5m])) < 100'
for: 2m
labels:
severity: warning
annotations:
summary: Istio low total request rate (instance {{ $labels.instance }})
description: "Global request rate in the service mesh is unusually low.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioHigh5xxErrorRate
expr:
'sum(rate(istio_requests_total{reporter="destination", response_code=~"5.*"}[5m])) /
sum(rate(istio_requests_total{reporter="destination"}[5m])) * 100 > 5'
for: 1m
labels:
severity: warning
annotations:
summary: Istio high 5xx error rate (instance {{ $labels.instance }})
description:
"High percentage of HTTP 5xx responses in Istio (> 5%).\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: IstioHigh4xxErrorRate
expr: 'sum(rate(istio_requests_total{reporter="destination", response_code=~"4.*"}[5m])) / sum(rate(istio_requests_total{reporter="destination"}[5m])) * 100 > 5'
for: 1m
labels:
severity: warning
annotations:
summary: Istio high 4xx error rate (instance {{ $labels.instance }})
description: "High percentage of HTTP 5xx responses in Istio (> 5%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioHighRequestLatency
expr:
'rate(istio_request_duration_milliseconds_sum{reporter="destination"}[1m]) /
rate(istio_request_duration_milliseconds_count{reporter="destination"}[1m]) > 100'
for: 1m
labels:
severity: warning
annotations:
summary: Istio high request latency (instance {{ $labels.instance }})
description:
"Istio average requests execution is longer than 100ms.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: IstioHigh5xxErrorRate
expr: 'sum(rate(istio_requests_total{reporter="destination", response_code=~"5.*"}[5m])) / sum(rate(istio_requests_total{reporter="destination"}[5m])) * 100 > 5'
for: 1m
labels:
severity: warning
annotations:
summary: Istio high 5xx error rate (instance {{ $labels.instance }})
description: "High percentage of HTTP 5xx responses in Istio (> 5%).\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioLatency99Percentile
expr:
"histogram_quantile(0.99, sum(rate(istio_request_duration_milliseconds_bucket[1m])) by
(destination_canonical_service, destination_workload_namespace, source_canonical_service,
source_workload_namespace, le)) > 1000"
for: 1m
labels:
severity: warning
annotations:
summary: Istio latency 99 percentile (instance {{ $labels.instance }})
description:
"Istio 1% slowest requests are longer than 1000ms.\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
- alert: IstioHighRequestLatency
expr: 'rate(istio_request_duration_milliseconds_sum{reporter="destination"}[1m]) / rate(istio_request_duration_milliseconds_count{reporter="destination"}[1m]) > 100'
for: 1m
labels:
severity: warning
annotations:
summary: Istio high request latency (instance {{ $labels.instance }})
description: "Istio average requests execution is longer than 100ms.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioLatency99Percentile
expr: 'histogram_quantile(0.99, sum(rate(istio_request_duration_milliseconds_bucket[1m])) by (destination_canonical_service, destination_workload_namespace, source_canonical_service, source_workload_namespace, le)) > 1000'
for: 1m
labels:
severity: warning
annotations:
summary: Istio latency 99 percentile (instance {{ $labels.instance }})
description: "Istio 1% slowest requests are longer than 1000ms.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioPilotDuplicateEntry
expr: 'sum(rate(pilot_duplicate_envoy_clusters{}[5m])) > 0'
for: 0m
labels:
severity: critical
annotations:
summary: Istio Pilot Duplicate Entry (instance {{ $labels.instance }})
description: "Istio pilot duplicate entry error.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: IstioPilotDuplicateEntry
expr: "sum(rate(pilot_duplicate_envoy_clusters{}[5m])) > 0"
for: 0m
labels:
severity: critical
annotations:
summary: Istio Pilot Duplicate Entry (instance {{ $labels.instance }})
description:
"Istio pilot duplicate entry error.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"

View File

@@ -1,311 +1,435 @@
groups:
- name: KubestateExporter
- name: KubestateExporter
rules:
- alert: KubernetesNodeNotReady
expr: 'kube_node_status_condition{condition="Ready",status="true"} == 0'
for: 10m
labels:
severity: critical
annotations:
summary: Kubernetes Node ready (node {{ $labels.node }})
description:
"Node {{ $labels.node }} has been unready for a long time\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
rules:
- alert: KubernetesNodeMemoryPressure
expr: 'kube_node_status_condition{condition="MemoryPressure",status="true"} == 1'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes memory pressure (node {{ $labels.node }})
description:
"Node {{ $labels.node }} has MemoryPressure condition\n VALUE = {{ $value }}\n LABELS
= {{ $labels }}"
- alert: KubernetesNodeNotReady
expr: 'kube_node_status_condition{condition="Ready",status="true"} == 0'
for: 10m
labels:
severity: critical
annotations:
summary: Kubernetes Node ready (node {{ $labels.node }})
description: "Node {{ $labels.node }} has been unready for a long time\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesNodeDiskPressure
expr: 'kube_node_status_condition{condition="DiskPressure",status="true"} == 1'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes disk pressure (node {{ $labels.node }})
description:
"Node {{ $labels.node }} has DiskPressure condition\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
- alert: KubernetesNodeMemoryPressure
expr: 'kube_node_status_condition{condition="MemoryPressure",status="true"} == 1'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes memory pressure (node {{ $labels.node }})
description: "Node {{ $labels.node }} has MemoryPressure condition\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesNodeNetworkUnavailable
expr: 'kube_node_status_condition{condition="NetworkUnavailable",status="true"} == 1'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes Node network unavailable (instance {{ $labels.instance }})
description:
"Node {{ $labels.node }} has NetworkUnavailable condition\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: KubernetesNodeDiskPressure
expr: 'kube_node_status_condition{condition="DiskPressure",status="true"} == 1'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes disk pressure (node {{ $labels.node }})
description: "Node {{ $labels.node }} has DiskPressure condition\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesNodeOutOfPodCapacity
expr:
'sum by (node) ((kube_pod_status_phase{phase="Running"} == 1) + on(uid) group_left(node)
(0 * kube_pod_info{pod_template_hash=""})) / sum by (node)
(kube_node_status_allocatable{resource="pods"}) * 100 > 90'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes Node out of pod capacity (instance {{ $labels.instance }})
description:
"Node {{ $labels.node }} is out of pod capacity\n VALUE = {{ $value }}\n LABELS = {{
$labels }}"
- alert: KubernetesNodeNetworkUnavailable
expr: 'kube_node_status_condition{condition="NetworkUnavailable",status="true"} == 1'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes Node network unavailable (instance {{ $labels.instance }})
description: "Node {{ $labels.node }} has NetworkUnavailable condition\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesContainerOomKiller
expr:
'(kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total
offset 10m >= 1) and ignoring (reason)
min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m])
== 1'
for: 0m
labels:
severity: warning
annotations:
summary:
Kubernetes container oom killer ({{ $labels.namespace }}/{{ $labels.pod }}:{{
$labels.container }})
description:
"Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has
been OOMKilled {{ $value }} times in the last 10 minutes.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: KubernetesNodeOutOfPodCapacity
expr: 'sum by (node) ((kube_pod_status_phase{phase="Running"} == 1) + on(uid) group_left(node) (0 * kube_pod_info{pod_template_hash=""})) / sum by (node) (kube_node_status_allocatable{resource="pods"}) * 100 > 90'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes Node out of pod capacity (instance {{ $labels.instance }})
description: "Node {{ $labels.node }} is out of pod capacity\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesJobFailed
expr: "kube_job_status_failed > 0"
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes Job failed ({{ $labels.namespace }}/{{ $labels.job_name }})
description:
"Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: KubernetesContainerOomKiller
expr: '(kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total offset 10m >= 1) and ignoring (reason) min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m]) == 1'
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes container oom killer ({{ $labels.namespace }}/{{ $labels.pod }}:{{ $labels.container }})
description: "Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has been OOMKilled {{ $value }} times in the last 10 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesCronjobSuspended
expr: "kube_cronjob_spec_suspend != 0"
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes CronJob suspended ({{ $labels.namespace }}/{{ $labels.cronjob }})
description:
"CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is suspended\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: KubernetesJobFailed
expr: 'kube_job_status_failed > 0'
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes Job failed ({{ $labels.namespace }}/{{ $labels.job_name }})
description: "Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPersistentvolumeclaimPending
expr: 'kube_persistentvolumeclaim_status_phase{phase="Pending"} == 1'
for: 2m
labels:
severity: warning
annotations:
summary:
Kubernetes PersistentVolumeClaim pending ({{ $labels.namespace }}/{{
$labels.persistentvolumeclaim }})
description:
"PersistentVolumeClaim {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is
pending\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesCronjobSuspended
expr: 'kube_cronjob_spec_suspend != 0'
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes CronJob suspended ({{ $labels.namespace }}/{{ $labels.cronjob }})
description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is suspended\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesVolumeOutOfDiskSpace
expr:
"kubelet_volume_stats_available_bytes / kubelet_volume_stats_capacity_bytes * 100 < 10"
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes Volume out of disk space (instance {{ $labels.instance }})
description:
"Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPersistentvolumeclaimPending
expr: 'kube_persistentvolumeclaim_status_phase{phase="Pending"} == 1'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes PersistentVolumeClaim pending ({{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }})
description: "PersistentVolumeClaim {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is pending\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesVolumeFullInFourDays
expr: "predict_linear(kubelet_volume_stats_available_bytes[6h:5m], 4 * 24 * 3600) < 0"
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes Volume full in four days (instance {{ $labels.instance }})
description:
"Volume under {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is expected to
fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: KubernetesVolumeOutOfDiskSpace
expr: 'kubelet_volume_stats_available_bytes / kubelet_volume_stats_capacity_bytes * 100 < 10'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes Volume out of disk space (instance {{ $labels.instance }})
description: "Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPersistentvolumeError
expr:
'kube_persistentvolume_status_phase{phase=~"Failed|Pending", job="kube-state-metrics"} > 0'
for: 0m
labels:
severity: critical
annotations:
summary:
Kubernetes PersistentVolumeClaim pending ({{ $labels.namespace }}/{{
$labels.persistentvolumeclaim }})
description:
"Persistent volume {{ $labels.persistentvolume }} is in bad state\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: KubernetesVolumeFullInFourDays
expr: 'predict_linear(kubelet_volume_stats_available_bytes[6h:5m], 4 * 24 * 3600) < 0'
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes Volume full in four days (instance {{ $labels.instance }})
description: "Volume under {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetDown
expr: "kube_statefulset_replicas != kube_statefulset_status_replicas_ready > 0"
for: 1m
labels:
severity: critical
annotations:
summary: Kubernetes StatefulSet down ({{ $labels.namespace }}/{{ $labels.statefulset }})
description:
"StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} went down\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPersistentvolumeError
expr: 'kube_persistentvolume_status_phase{phase=~"Failed|Pending", job="kube-state-metrics"} > 0'
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes PersistentVolumeClaim pending ({{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }})
description: "Persistent volume {{ $labels.persistentvolume }} is in bad state\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaScaleInability
expr:
'kube_horizontalpodautoscaler_status_condition{status="false", condition="AbleToScale"} ==
1'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes HPA scale inability (instance {{ $labels.instance }})
description:
"HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} is unable to
scale\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetDown
expr: 'kube_statefulset_replicas != kube_statefulset_status_replicas_ready > 0'
for: 1m
labels:
severity: critical
annotations:
summary: Kubernetes StatefulSet down ({{ $labels.namespace }}/{{ $labels.statefulset }})
description: "StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} went down\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaMetricsUnavailability
expr:
'kube_horizontalpodautoscaler_status_condition{status="false", condition="ScalingActive"}
== 1'
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes HPA metrics unavailability (instance {{ $labels.instance }})
description:
"HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} is unable to collect
metrics\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaScaleInability
expr: 'kube_horizontalpodautoscaler_status_condition{status="false", condition="AbleToScale"} == 1'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes HPA scale inability (instance {{ $labels.instance }})
description: "HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} is unable to scale\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaScaleMaximum
expr:
"kube_horizontalpodautoscaler_status_desired_replicas >=
kube_horizontalpodautoscaler_spec_max_replicas"
for: 2m
labels:
severity: info
annotations:
summary: Kubernetes HPA scale maximum (instance {{ $labels.instance }})
description:
"HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has hit maximum
number of desired pods\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaMetricsUnavailability
expr: 'kube_horizontalpodautoscaler_status_condition{status="false", condition="ScalingActive"} == 1'
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes HPA metrics unavailability (instance {{ $labels.instance }})
description: "HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} is unable to collect metrics\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaUnderutilized
expr:
"max(quantile_over_time(0.5, kube_horizontalpodautoscaler_status_desired_replicas[1d]) ==
kube_horizontalpodautoscaler_spec_min_replicas) by (horizontalpodautoscaler) > 3"
for: 0m
labels:
severity: info
annotations:
summary: Kubernetes HPA underutilized (instance {{ $labels.instance }})
description:
"HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} is constantly at
minimum replicas for 50% of the time. Potential cost saving here.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaScaleMaximum
expr: 'kube_horizontalpodautoscaler_status_desired_replicas >= kube_horizontalpodautoscaler_spec_max_replicas'
for: 2m
labels:
severity: info
annotations:
summary: Kubernetes HPA scale maximum (instance {{ $labels.instance }})
description: "HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has hit maximum number of desired pods\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPodNotHealthy
expr: 'sum by (namespace, pod) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"}) > 0'
for: 15m
labels:
severity: critical
annotations:
summary: Kubernetes Pod not healthy ({{ $labels.namespace }}/{{ $labels.pod }})
description:
"Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-running state for
longer than 15 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesHpaUnderutilized
expr: 'max(quantile_over_time(0.5, kube_horizontalpodautoscaler_status_desired_replicas[1d]) == kube_horizontalpodautoscaler_spec_min_replicas) by (horizontalpodautoscaler) > 3'
for: 0m
labels:
severity: info
annotations:
summary: Kubernetes HPA underutilized (instance {{ $labels.instance }})
description: "HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} is constantly at minimum replicas for 50% of the time. Potential cost saving here.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPodCrashLooping
expr: "increase(kube_pod_container_status_restarts_total[1m]) > 3"
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes pod crash looping ({{ $labels.namespace }}/{{ $labels.pod }})
description:
"Pod {{ $labels.namespace }}/{{ $labels.pod }} is crash looping\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: KubernetesPodNotHealthy
expr: 'sum by (namespace, pod) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"}) > 0'
for: 15m
labels:
severity: critical
annotations:
summary: Kubernetes Pod not healthy ({{ $labels.namespace }}/{{ $labels.pod }})
description: "Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-running state for longer than 15 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesReplicasetReplicasMismatch
expr: "kube_replicaset_spec_replicas != kube_replicaset_status_ready_replicas"
for: 10m
labels:
severity: warning
annotations:
summary:
Kubernetes ReplicasSet mismatch ({{ $labels.namespace }}/{{ $labels.replicaset }})
description:
"ReplicaSet {{ $labels.namespace }}/{{ $labels.replicaset }} replicas mismatch\n VALUE
= {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesPodCrashLooping
expr: 'increase(kube_pod_container_status_restarts_total[1m]) > 3'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes pod crash looping ({{ $labels.namespace }}/{{ $labels.pod }})
description: "Pod {{ $labels.namespace }}/{{ $labels.pod }} is crash looping\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDeploymentReplicasMismatch
expr: "kube_deployment_spec_replicas != kube_deployment_status_replicas_available"
for: 10m
labels:
severity: warning
annotations:
summary:
Kubernetes Deployment replicas mismatch ({{ $labels.namespace }}/{{ $labels.deployment
}})
description:
"Deployment {{ $labels.namespace }}/{{ $labels.deployment }} replicas mismatch\n VALUE
= {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesReplicasetReplicasMismatch
expr: 'kube_replicaset_spec_replicas != kube_replicaset_status_ready_replicas'
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes ReplicasSet mismatch ({{ $labels.namespace }}/{{ $labels.replicaset }})
description: "ReplicaSet {{ $labels.namespace }}/{{ $labels.replicaset }} replicas mismatch\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetReplicasMismatch
expr: "kube_statefulset_status_replicas_ready != kube_statefulset_status_replicas"
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes StatefulSet replicas mismatch (instance {{ $labels.instance }})
description:
"StatefulSet does not match the expected number of replicas.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: KubernetesDeploymentReplicasMismatch
expr: 'kube_deployment_spec_replicas != kube_deployment_status_replicas_available'
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes Deployment replicas mismatch ({{ $labels.namespace }}/{{ $labels.deployment }})
description: "Deployment {{ $labels.namespace }}/{{ $labels.deployment }} replicas mismatch\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDeploymentGenerationMismatch
expr: "kube_deployment_status_observed_generation != kube_deployment_metadata_generation"
for: 10m
labels:
severity: critical
annotations:
summary:
Kubernetes Deployment generation mismatch ({{ $labels.namespace }}/{{ $labels.deployment
}})
description:
"Deployment {{ $labels.namespace }}/{{ $labels.deployment }} has failed but has not been
rolled back.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetReplicasMismatch
expr: 'kube_statefulset_status_replicas_ready != kube_statefulset_status_replicas'
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes StatefulSet replicas mismatch (instance {{ $labels.instance }})
description: "StatefulSet does not match the expected number of replicas.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetGenerationMismatch
expr: "kube_statefulset_status_observed_generation != kube_statefulset_metadata_generation"
for: 10m
labels:
severity: critical
annotations:
summary:
Kubernetes StatefulSet generation mismatch ({{ $labels.namespace }}/{{
$labels.statefulset }})
description:
"StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has failed but has not
been rolled back.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDeploymentGenerationMismatch
expr: 'kube_deployment_status_observed_generation != kube_deployment_metadata_generation'
for: 10m
labels:
severity: critical
annotations:
summary: Kubernetes Deployment generation mismatch ({{ $labels.namespace }}/{{ $labels.deployment }})
description: "Deployment {{ $labels.namespace }}/{{ $labels.deployment }} has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetUpdateNotRolledOut
expr:
"max without (revision) (kube_statefulset_status_current_revision unless
kube_statefulset_status_update_revision) * (kube_statefulset_replicas !=
kube_statefulset_status_replicas_updated)"
for: 10m
labels:
severity: warning
annotations:
summary:
Kubernetes StatefulSet update not rolled out ({{ $labels.namespace }}/{{
$labels.statefulset }})
description:
"StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been
rolled out.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetGenerationMismatch
expr: 'kube_statefulset_status_observed_generation != kube_statefulset_metadata_generation'
for: 10m
labels:
severity: critical
annotations:
summary: Kubernetes StatefulSet generation mismatch ({{ $labels.namespace }}/{{ $labels.statefulset }})
description: "StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDaemonsetRolloutStuck
expr:
"kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled * 100
< 100 or kube_daemonset_status_desired_number_scheduled -
kube_daemonset_status_current_number_scheduled > 0"
for: 10m
labels:
severity: warning
annotations:
summary:
Kubernetes DaemonSet rollout stuck ({{ $labels.namespace }}/{{ $labels.daemonset }})
description:
"Some Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not
scheduled or not ready\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesStatefulsetUpdateNotRolledOut
expr: 'max without (revision) (kube_statefulset_status_current_revision unless kube_statefulset_status_update_revision) * (kube_statefulset_replicas != kube_statefulset_status_replicas_updated)'
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes StatefulSet update not rolled out ({{ $labels.namespace }}/{{ $labels.statefulset }})
description: "StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been rolled out.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDaemonsetMisscheduled
expr: "kube_daemonset_status_number_misscheduled > 0"
for: 1m
labels:
severity: critical
annotations:
summary:
Kubernetes DaemonSet misscheduled ({{ $labels.namespace }}/{{ $labels.daemonset }})
description:
"Some Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running
where they are not supposed to run\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDaemonsetRolloutStuck
expr: 'kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled * 100 < 100 or kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled > 0'
for: 10m
labels:
severity: warning
annotations:
summary: Kubernetes DaemonSet rollout stuck ({{ $labels.namespace }}/{{ $labels.daemonset }})
description: "Some Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled or not ready\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesCronjobTooLong
expr: "time() - kube_cronjob_next_schedule_time > 3600"
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes CronJob too long ({{ $labels.namespace }}/{{ $labels.cronjob }})
description:
"CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to
complete.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesDaemonsetMisscheduled
expr: 'kube_daemonset_status_number_misscheduled > 0'
for: 1m
labels:
severity: critical
annotations:
summary: Kubernetes DaemonSet misscheduled ({{ $labels.namespace }}/{{ $labels.daemonset }})
description: "Some Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesJobSlowCompletion
expr: "kube_job_spec_completions - kube_job_status_succeeded - kube_job_status_failed > 0"
for: 12h
labels:
severity: critical
annotations:
summary: Kubernetes job slow completion ({{ $labels.namespace }}/{{ $labels.job_name }})
description:
"Kubernetes Job {{ $labels.namespace }}/{{ $labels.job_name }} did not complete in
time.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesCronjobTooLong
expr: 'time() - kube_cronjob_next_schedule_time > 3600'
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes CronJob too long ({{ $labels.namespace }}/{{ $labels.cronjob }})
description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to complete.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesApiServerErrors
expr:
'sum(rate(apiserver_request_total{job="apiserver",code=~"^(?:5..)$"}[1m])) /
sum(rate(apiserver_request_total{job="apiserver"}[1m])) * 100 > 3'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes API server errors (instance {{ $labels.instance }})
description:
"Kubernetes API server is experiencing high error rate\n VALUE = {{ $value }}\n LABELS
= {{ $labels }}"
- alert: KubernetesJobSlowCompletion
expr: 'kube_job_spec_completions - kube_job_status_succeeded - kube_job_status_failed > 0'
for: 12h
labels:
severity: critical
annotations:
summary: Kubernetes job slow completion ({{ $labels.namespace }}/{{ $labels.job_name }})
description: "Kubernetes Job {{ $labels.namespace }}/{{ $labels.job_name }} did not complete in time.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesApiClientErrors
expr:
'(sum(rate(rest_client_requests_total{code=~"(4|5).."}[1m])) by (instance, job) /
sum(rate(rest_client_requests_total[1m])) by (instance, job)) * 100 > 1'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes API client errors (instance {{ $labels.instance }})
description:
"Kubernetes API client is experiencing high error rate\n VALUE = {{ $value }}\n LABELS
= {{ $labels }}"
- alert: KubernetesApiServerErrors
expr: 'sum(rate(apiserver_request_total{job="apiserver",code=~"^(?:5..)$"}[1m])) / sum(rate(apiserver_request_total{job="apiserver"}[1m])) * 100 > 3'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes API server errors (instance {{ $labels.instance }})
description: "Kubernetes API server is experiencing high error rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesClientCertificateExpiresNextWeek
expr:
'apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and
histogram_quantile(0.01, sum by (job, le)
(rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) <
7*24*60*60'
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes client certificate expires next week (instance {{ $labels.instance }})
description:
"A client certificate used to authenticate to the apiserver is expiring next
week.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesApiClientErrors
expr: '(sum(rate(rest_client_requests_total{code=~"(4|5).."}[1m])) by (instance, job) / sum(rate(rest_client_requests_total[1m])) by (instance, job)) * 100 > 1'
for: 2m
labels:
severity: critical
annotations:
summary: Kubernetes API client errors (instance {{ $labels.instance }})
description: "Kubernetes API client is experiencing high error rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesClientCertificateExpiresSoon
expr:
'apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and
histogram_quantile(0.01, sum by (job, le)
(rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) <
24*60*60'
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes client certificate expires soon (instance {{ $labels.instance }})
description:
"A client certificate used to authenticate to the apiserver is expiring in less than
24.0 hours.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesClientCertificateExpiresNextWeek
expr: 'apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 7*24*60*60'
for: 0m
labels:
severity: warning
annotations:
summary: Kubernetes client certificate expires next week (instance {{ $labels.instance }})
description: "A client certificate used to authenticate to the apiserver is expiring next week.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesClientCertificateExpiresSoon
expr: 'apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 24*60*60'
for: 0m
labels:
severity: critical
annotations:
summary: Kubernetes client certificate expires soon (instance {{ $labels.instance }})
description: "A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesApiServerLatency
expr: 'histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{subresource!="log",verb!~"^(?:CONNECT|WATCHLIST|WATCH|PROXY)$"} [10m])) WITHOUT (instance, resource)) > 1'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes API server latency (instance {{ $labels.instance }})
description: "Kubernetes API server has a 99th percentile latency of {{ $value }} seconds for {{ $labels.verb }} {{ $labels.resource }}.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: KubernetesApiServerLatency
expr:
'histogram_quantile(0.99,
sum(rate(apiserver_request_duration_seconds_bucket{subresource!="log",verb!~"^(?:CONNECT|WATCHLIST|WATCH|PROXY)$"}
[10m])) WITHOUT (instance, resource)) > 1'
for: 2m
labels:
severity: warning
annotations:
summary: Kubernetes API server latency (instance {{ $labels.instance }})
description:
"Kubernetes API server has a 99th percentile latency of {{ $value }} seconds for {{
$labels.verb }} {{ $labels.resource }}.\n VALUE = {{ $value }}\n LABELS = {{ $labels
}}"

View File

@@ -1,347 +1,508 @@
groups:
- name: NodeExporter
- name: NodeExporter
rules:
- alert: HostOutOfMemory
expr:
'(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance)
group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of memory (instance {{ $labels.instance }})
description:
"Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels
}}"
rules:
- alert: HostMemoryUnderMemoryPressure
expr:
'(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host memory under memory pressure (instance {{ $labels.instance }})
description:
"The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfMemory
expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of memory (instance {{ $labels.instance }})
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryIsUnderutilized
expr:
'(100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes *
100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1w
labels:
severity: info
annotations:
summary: Host Memory is underutilized (instance {{ $labels.instance }})
description:
"Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{
$labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryUnderMemoryPressure
expr: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host memory under memory pressure (instance {{ $labels.instance }})
description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputIn
expr:
'(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual network throughput in (instance {{ $labels.instance }})
description:
"Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: HostMemoryIsUnderutilized
expr: '(100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1w
labels:
severity: info
annotations:
summary: Host Memory is underutilized (instance {{ $labels.instance }})
description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputOut
expr:
'(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual network throughput out (instance {{ $labels.instance }})
description:
"Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualNetworkThroughputIn
expr: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual network throughput in (instance {{ $labels.instance }})
description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskReadRate
expr:
'(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk read rate (instance {{ $labels.instance }})
description:
"Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
- alert: HostUnusualNetworkThroughputOut
expr: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual network throughput out (instance {{ $labels.instance }})
description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskWriteRate
expr:
'(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write rate (instance {{ $labels.instance }})
description:
"Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
- alert: HostUnusualDiskReadRate
expr: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk read rate (instance {{ $labels.instance }})
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfDiskSpace
expr:
'((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance,
device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of disk space (instance {{ $labels.instance }})
description:
"Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskWriteRate
expr: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write rate (instance {{ $labels.instance }})
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostDiskWillFillIn24Hours
expr:
'((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance,
device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 *
3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
description:
"Filesystem is predicted to run out of space within the next 24 hours at current write
rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfDiskSpace
expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of disk space (instance {{ $labels.instance }})
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfInodes
expr:
'(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"}
* 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of inodes (instance {{ $labels.instance }})
description:
"Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: HostDiskWillFillIn24Hours
expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostFilesystemDeviceError
expr: "node_filesystem_device_error == 1"
for: 0m
labels:
severity: critical
annotations:
summary: Host filesystem device error (instance {{ $labels.instance }})
description:
"{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }}
filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOutOfInodes
expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host out of inodes (instance {{ $labels.instance }})
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostInodesWillFillIn24Hours
expr:
'(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"}
* 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 *
3600) < 0 and ON (instance, device, mountpoint)
node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
description:
"Filesystem is predicted to run out of inodes within the next 24 hours at current write
rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostFilesystemDeviceError
expr: 'node_filesystem_device_error == 1'
for: 0m
labels:
severity: critical
annotations:
summary: Host filesystem device error (instance {{ $labels.instance }})
description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskReadLatency
expr:
'(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m])
> 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left
(nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk read latency (instance {{ $labels.instance }})
description:
"Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
- alert: HostInodesWillFillIn24Hours
expr: '(node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskWriteLatency
expr:
'(rate(node_disk_write_time_seconds_total[1m]) /
rate(node_disk_writes_completed_total[1m]) > 0.1 and
rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write latency (instance {{ $labels.instance }})
description:
"Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS =
{{ $labels }}"
- alert: HostUnusualDiskReadLatency
expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk read latency (instance {{ $labels.instance }})
description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostHighCpuLoad
expr:
'(sum by (instance) (avg by (mode, instance)
(rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left
(nodename) node_uname_info{nodename=~".+"}'
for: 10m
labels:
severity: warning
annotations:
summary: Host high CPU load (instance {{ $labels.instance }})
description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskWriteLatency
expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write latency (instance {{ $labels.instance }})
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuIsUnderutilized
expr:
'(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance)
group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1w
labels:
severity: info
annotations:
summary: Host CPU is underutilized (instance {{ $labels.instance }})
description:
"CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: HostHighCpuLoad
expr: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 10m
labels:
severity: warning
annotations:
summary: Host high CPU load (instance {{ $labels.instance }})
description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuStealNoisyNeighbor
expr:
'(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
description:
"CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may
be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuIsUnderutilized
expr: '(100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1w
labels:
severity: info
annotations:
summary: Host CPU is underutilized (instance {{ $labels.instance }})
description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuHighIowait
expr:
'(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU high iowait (instance {{ $labels.instance }})
description:
"CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: HostCpuStealNoisyNeighbor
expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskIo
expr:
'(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk IO (instance {{ $labels.instance }})
description:
"Time spent in IO is too high on {{ $labels.instance }}. Check storage for
issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostCpuHighIowait
expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU high iowait (instance {{ $labels.instance }})
description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostContextSwitching
expr:
'((rate(node_context_switches_total[5m])) / (count without(cpu, mode)
(node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host context switching (instance {{ $labels.instance }})
description:
"Context switching is growing on the node (> 10000 / CPU / s)\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: HostUnusualDiskIo
expr: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSwapIsFillingUp
expr:
'((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host swap is filling up (instance {{ $labels.instance }})
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostContextSwitching
expr: '((rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host context switching (instance {{ $labels.instance }})
description: "Context switching is growing on the node (> 10000 / CPU / s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSystemdServiceCrashed
expr:
'(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host systemd service crashed (instance {{ $labels.instance }})
description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSwapIsFillingUp
expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host swap is filling up (instance {{ $labels.instance }})
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostPhysicalComponentTooHot
expr:
'((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor)
node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host physical component too hot (instance {{ $labels.instance }})
description:
"Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostSystemdServiceCrashed
expr: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host systemd service crashed (instance {{ $labels.instance }})
description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNodeOvertemperatureAlarm
expr:
'(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: critical
annotations:
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
description:
"Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{
$labels }}"
- alert: HostPhysicalComponentTooHot
expr: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host physical component too hot (instance {{ $labels.instance }})
description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostRaidArrayGotInactive
expr:
'(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: critical
annotations:
summary: Host RAID array got inactive (instance {{ $labels.instance }})
description:
"RAID array {{ $labels.device }} is in a degraded state due to one or more disk
failures. The number of spare drives is insufficient to fix the issue
automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNodeOvertemperatureAlarm
expr: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: critical
annotations:
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostRaidDiskFailure
expr:
'(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host RAID disk failure (instance {{ $labels.instance }})
description:
"At least one device in RAID array on {{ $labels.instance }} failed. Array {{
$labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: HostRaidArrayGotInactive
expr: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: critical
annotations:
summary: Host RAID array got inactive (instance {{ $labels.instance }})
description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostKernelVersionDeviations
expr:
'(count(sum(label_replace(node_uname_info, "kernel", "$1", "release",
"([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 6h
labels:
severity: warning
annotations:
summary: Host kernel version deviations (instance {{ $labels.instance }})
description:
"Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostRaidDiskFailure
expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host RAID disk failure (instance {{ $labels.instance }})
description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostOomKillDetected
expr:
'(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{ $labels.instance }})
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostKernelVersionDeviations
expr: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 6h
labels:
severity: warning
annotations:
summary: Host kernel version deviations (instance {{ $labels.instance }})
description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostEdacCorrectableErrorsDetected
expr:
'(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left
(nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: info
annotations:
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
description:
"Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory
errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{
$labels }}"
- alert: HostOomKillDetected
expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{ $labels.instance }})
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostEdacUncorrectableErrorsDetected
expr:
'(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
description:
"Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory
errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{
$labels }}"
- alert: HostEdacCorrectableErrorsDetected
expr: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: info
annotations:
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkReceiveErrors
expr:
'(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m])
> 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Receive Errors (instance {{ $labels.instance }})
description:
"Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf
\"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: HostEdacUncorrectableErrorsDetected
expr: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 0m
labels:
severity: warning
annotations:
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkTransmitErrors
expr:
'(rate(node_network_transmit_errs_total[2m]) /
rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
description:
"Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf
\"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: HostNetworkReceiveErrors
expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Receive Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkInterfaceSaturated
expr:
'((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) +
rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) /
node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1m
labels:
severity: warning
annotations:
summary: Host Network Interface Saturated (instance {{ $labels.instance }})
description:
"The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting
overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkTransmitErrors
expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkBondDegraded
expr:
'((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
description:
"Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{
$value }}\n LABELS = {{ $labels }}"
- alert: HostNetworkInterfaceSaturated
expr: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 1m
labels:
severity: warning
annotations:
summary: Host Network Interface Saturated (instance {{ $labels.instance }})
description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostConntrackLimit
expr:
'(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance)
group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host conntrack limit (instance {{ $labels.instance }})
description:
"The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{
$labels }}"
- alert: HostNetworkBondDegraded
expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockSkew
expr:
'((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or
(node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 10m
labels:
severity: warning
annotations:
summary: Host clock skew (instance {{ $labels.instance }})
description:
"Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this
host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostConntrackLimit
expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 5m
labels:
severity: warning
annotations:
summary: Host conntrack limit (instance {{ $labels.instance }})
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockNotSynchronising
expr:
'(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) *
on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host clock not synchronising (instance {{ $labels.instance }})
description:
"Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value
}}\n LABELS = {{ $labels }}"
- alert: HostClockSkew
expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 10m
labels:
severity: warning
annotations:
summary: Host clock skew (instance {{ $labels.instance }})
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostClockNotSynchronising
expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 2m
labels:
severity: warning
annotations:
summary: Host clock not synchronising (instance {{ $labels.instance }})
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostRequiresReboot
expr: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
for: 4h
labels:
severity: info
annotations:
summary: Host requires reboot (instance {{ $labels.instance }})
description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: HostRequiresReboot
expr:
'(node_reboot_required > 0) * on(instance) group_left (nodename)
node_uname_info{nodename=~".+"}'
for: 4h
labels:
severity: info
annotations:
summary: Host requires reboot (instance {{ $labels.instance }})
description:
"{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}\n LABELS = {{
$labels }}"