Rancher

webhook发送的内容样例

警报

{
    "receiver":"c-twl6v:node-alert",
    "status":"firing",
    "alerts":[
        {
            "status":"firing",
            "labels":{
                "alert_name":"High node memory utilization",
                "alert_type":"metric",
                "alertname":"High node memory utilization",
                "cluster_name":"aliyun (ID: c-twl6v)",
                "comparison":"less or equal",
                "duration":"30s",
                "expression":"(1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)) * 100\\u003c=80",
                "group_id":"c-twl6v:node-alert",
                "instance":"192.168.8.81:9796",
                "prometheus":"cattle-prometheus/cluster-monitoring",
                "prometheus_from":"aliyun",
                "rule_id":"c-twl6v:node-alert_high-memmory",
                "severity":"warning",
                "threshold_value":"80"
            },
            "annotations":{
                "current_value":"17.380269598523935"
            },
            "startsAt":"2020-06-04T03:03:55.505652247Z",
            "endsAt":"0001-01-01T00:00:00Z",
            "generatorURL":"http://prometheus-cluster-monitoring-0:9090/graph?g0.expr=%281+-+sum+by%28instance%29+%28node_memory_MemAvailable_bytes%29+%2F+sum+by%28instance%29+%28node_memory_MemTotal_bytes%29%29+%2A+100+%3C%3D+80\\u0026g0.tab=1"
        },
        {
            "status":"firing",
            "labels":{
                "alert_name":"High node memory utilization",
                "alert_type":"metric",
                "alertname":"High node memory utilization",
                "cluster_name":"aliyun (ID: c-twl6v)",
                "comparison":"less or equal",
                "duration":"30s",
                "expression":"(1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)) * 100\\u003c=80",
                "group_id":"c-twl6v:node-alert",
                "instance":"192.168.9.94:9796",
                "prometheus":"cattle-prometheus/cluster-monitoring",
                "prometheus_from":"aliyun",
                "rule_id":"c-twl6v:node-alert_high-memmory",
                "severity":"warning",
                "threshold_value":"80"
            },
            "annotations":{
                "current_value":"12.721630318627353"
            },
            "startsAt":"2020-06-04T03:03:55.505652247Z",
            "endsAt":"0001-01-01T00:00:00Z",
            "generatorURL":"http://prometheus-cluster-monitoring-0:9090/graph?g0.expr=%281+-+sum+by%28instance%29+%28node_memory_MemAvailable_bytes%29+%2F+sum+by%28instance%29+%28node_memory_MemTotal_bytes%29%29+%2A+100+%3C%3D+80\\u0026g0.tab=1"
        }],
    "groupLabels":{
        "rule_id":"c-twl6v:node-alert_high-memmory"
    },
    "commonLabels":{
        "alert_name":"High node memory utilization",
        "alert_type":"metric",
        "alertname":"High node memory utilization",
        "cluster_name":"aliyun (ID: c-twl6v)",
        "comparison":"less or equal",
        "duration":"30s",
        "expression":"(1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)) * 100\\u003c=80",
        "group_id":"c-twl6v:node-alert",
        "prometheus":"cattle-prometheus/cluster-monitoring",
        "prometheus_from":"aliyun",
        "rule_id":"c-twl6v:node-alert_high-memmory",
        "severity":"warning",
        "threshold_value":"80"
    },
    "commonAnnotations":{
    },
    "externalURL":"http://alertmanager-cluster-alerting-0:9093",
    "version":"4",
    "groupKey":"{}/{group_id=\\"c-twl6v:node-alert\\"}/{rule_id=\\"c-twl6v:node-alert_high-memmory\\"}:{rule_id=\\"c-twl6v:node-alert_high-memmory\\"}"
}

解决

{
    "receiver":"c-twl6v:node-alert",
    "status":"resolved",
    "alerts":[
        {
            "status":"resolved",
            "labels":{
                "alert_name":"High node memory utilization",
                "alert_type":"metric",
                "alertname":"High node memory utilization",
                "cluster_name":"aliyun (ID: c-twl6v)",
                "comparison":"less or equal",
                "duration":"30s",
                "expression":"(1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)) * 100\\u003c=80",
                "group_id":"c-twl6v:node-alert",
                "instance":"192.168.8.81:9796",
                "prometheus":"cattle-prometheus/cluster-monitoring",
                "prometheus_from":"aliyun",
                "rule_id":"c-twl6v:node-alert_high-memmory",
                "severity":"warning",
                "threshold_value":"80"
            },
            "annotations":{
                "current_value":"17.33121575486386"
            },
            "startsAt":"2020-06-04T03:03:55.505652247Z",
            "endsAt":"2020-06-04T03:12:55.505652247Z",
            "generatorURL":"http://prometheus-cluster-monitoring-0:9090/graph?g0.expr=%281+-+sum+by%28instance%29+%28node_memory_MemAvailable_bytes%29+%2F+sum+by%28instance%29+%28node_memory_MemTotal_bytes%29%29+%2A+100+%3C%3D+80\\u0026g0.tab=1"
        },
        {
            "status":"resolved",
            "labels":{
                "alert_name":"High node memory utilization",
                "alert_type":"metric",
                "alertname":"High node memory utilization",
                "cluster_name":"aliyun (ID: c-twl6v)",
                "comparison":"less or equal",
                "duration":"30s",
                "expression":"(1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)) * 100\\u003c=80",
                "group_id":"c-twl6v:node-alert",
                "instance":"192.168.9.94:9796",
                "prometheus":"cattle-prometheus/cluster-monitoring",
                "prometheus_from":"aliyun",
                "rule_id":"c-twl6v:node-alert_high-memmory",
                "severity":"warning",
                "threshold_value":"80"
            },
            "annotations":{
                "current_value":"13.226702300952963"
            },
            "startsAt":"2020-06-04T03:03:55.505652247Z",
            "endsAt":"2020-06-04T03:12:55.505652247Z",
            "generatorURL":"http://prometheus-cluster-monitoring-0:9090/graph?g0.expr=%281+-+sum+by%28instance%29+%28node_memory_MemAvailable_bytes%29+%2F+sum+by%28instance%29+%28node_memory_MemTotal_bytes%29%29+%2A+100+%3C%3D+80\\u0026g0.tab=1"
        }],
    "groupLabels":{
        "rule_id":"c-twl6v:node-alert_high-memmory"
    },
    "commonLabels":{
        "alert_name":"High node memory utilization",
        "alert_type":"metric",
        "alertname":"High node memory utilization",
        "cluster_name":"aliyun (ID: c-twl6v)",
        "comparison":"less or equal",
        "duration":"30s",
        "expression":"(1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)) * 100\\u003c=80",
        "group_id":"c-twl6v:node-alert",
        "prometheus":"cattle-prometheus/cluster-monitoring",
        "prometheus_from":"aliyun",
        "rule_id":"c-twl6v:node-alert_high-memmory",
        "severity":"warning",
        "threshold_value":"80"
    },
    "commonAnnotations":{
    },
    "externalURL":"http://alertmanager-cluster-alerting-0:9093",
    "version":"4",
    "groupKey":"{}/{group_id=\\"c-twl6v:node-alert\\"}/{rule_id=\\"c-twl6v:node-alert_high-memmory\\"}:{rule_id=\\"c-twl6v:node-alert_high-memmory\\"}"
}

posted @ 2020-06-04 11:17  ruixing  阅读(202)  评论(0编辑  收藏  举报