Prometheus主配置文件
# 全局配置段
global:
scrape_interval: 15s # 采集间隔时间
evaluation_interval: 15s # 计算报警和预聚合间隔
scrape_timeout: 10s # 采集超时时间
query_log_file: /opt/logs/prometheus_query_log # 查询日志,包含各阶段耗时统计
external_labels: # 全局标签组
account: 'huawei-main' # 通过本实例采集的数据都会叠加下面的标签
# Alertmanager 信息段
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# 告警、预聚合配置文件段
rule_files:
# - "first_rules.yml"
#- "second_rules.yml"
# 采集配置段
scrape_configs:
- job_name: "prometheus"
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
file_sd_configs:
- files:
- targets/prometheus-*.yml
refresh_interval: 1m
- job_name: "nodes"
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
file_sd_configs:
- files:
- targets/nodes-*.yml
refresh_interval: 1m
- job_name: "mysqld"
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
file_sd_configs:
- files:
- targets/mysql-*.yml
refresh_interval: 1m
- job_name: "process"
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
file_sd_configs:
- files:
- targets/process-*.yml
refresh_interval: 1m
# 远程查询段
remote_read:
# prometheus
#- url: http://prometheus/v1/read
# read_recent: true
# m3db
#- url: "http://m3coordinator-read:7201/api/v1/prom/remote/read"
# read_recent: true
# 远程写入段
remote_write:
#- url: "http://m3coordinator-write:7201/api/v1/prom/remote/write"
# queue_config:
# capacity: 10000
# max_samples_per_send: 60000
# write_relabel_configs:
# - source_labels: [__name__]
# separator: ;
# # 标签key前缀匹配到的drop
# regex: '(kubelet_|apiserver_|container_fs_).*'
# replacement: $1
# action: drop