二进制部署Prometheus 和Grafana

二进制部署Prometheus 和Grafana

nginx_exporter插件编译安装

  • nginx插件编译安装
# mkdir /usr/local/nginx-vts-module
# ls /usr/local/nginx-vts-module ----上传 Nginx 扩展模块到服务器上并解压
nginx-module-sts-0.1.1.tar.gz nginx-module-stream-sts-0.1.1.tar.gz nginx-module-vts-0.1.18.tar.gz nginx-1.21.5 nginx-module-stream-sts-0.1.1 nginx-module-sts-0.1.1 nginx-module-vts-0.1.18
# nginx -V ----获取当前已安装 Nginx 的运行参数
# yum -y install gcc gcc-c++ make automake autoconf pcre pcre-devel zlib zlib-devel openssl openssl-devel libtool ----
# cd /usr/local/nginx-vts-module/nginx-1.21.5
# --prefix=/usr/local/nginx --with-http_v2_module --with-http_ssl_module --with-http_gzip_static_module --with-poll_module --with-file-aio --with-http_realip_module --with-http_addition_module --with-http_addition_module --with-http_random_index_module --with-http_ssl_module --http-uwsgi-temp-path=/usr/local/nginx/uwsgi_temp --http-scgi-temp-path=/usr/local/nginx/scgi_temp --with-pcre --with-http_stub_status_module --with-stream --add-module=/usr/local/src/nginx-module-vts-0.1.18 --add-module=/usr/local/src/nginx-module-sts-0.1.1 --add-module=/usr/local/src/nginx-module-stream-sts-0.1.1 --add-module=/usr/local/src/nginx-goodies-nginx-sticky-module-ng-08a395c66e42/ --add-module=/usr/local/src/naxsi-0.55.3/naxsi_src/ --add-module=/usr/local/src/nginx_upstream_check_module-master --with-openssl=/usr/local/src/openssl-1.1.1g
# make
# make install
  • 修改nginx相关配置文件
# vim ../conf/nginx.conf
http {
vhost_traffic_status_zone; ----新增
...
server {
...
location /status { ----新增location
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
}
}
}
# ./nginx -t
# killall nginx
# ./nginx
# curl http://10.130.41.10/status/format/prometheus ----访问对应 Vhost 检查是否能获取到数据,有以下内容表示正常。
# HELP nginx_vts_info Nginx info
# TYPE nginx_vts_info gauge
nginx_vts_info{hostname="yidongqy.com",version="1.21.5"} 1
# HELP nginx_vts_start_time_seconds Nginx start time
# TYPE nginx_vts_start_time_seconds gauge
nginx_vts_start_time_seconds 1642158757.711
  • 附件中有编译好的nginx可以执行使用适用于Centos7系列系统
[root@portal_node_1 BAK]# rz -be nginx-ok.tar.gz #解压后提供nginx 二进制文件,然后再local对应配置文件中添加如下配置项
[root@portal_node_1 vhosts]# vim nginx.conf #在http块中添加如下配置项
vhost_traffic_status_zone;
[root@portal_node_1 vhosts]# vim local_location.conf
location /status {
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
}
[root@portal_node_1 sbin]# killall nginx
[root@portal_node_1 sbin]# ./nginx
[root@portal_node_1 sbin]# curl http://10.130.41.10/status/format/prometheus #然后再访问是否有以下相关数据
# HELP nginx_vts_info Nginx info
# TYPE nginx_vts_info gauge
nginx_vts_info{hostname="portal_node_1",version="1.20.2"} 1
# HELP nginx_vts_start_time_seconds Nginx start time
# TYPE nginx_vts_start_time_seconds gauge
nginx_vts_start_time_seconds 1642660147.903
# HELP nginx_vts_main_connections Nginx connections
# TYPE nginx_vts_main_connections gauge
nginx_vts_main_connections{status="accepted"} 100
nginx_vts_main_connections{status="active"} 8
nginx_vts_main_connections{status="handled"} 100
nginx_vts_main_connections{status="reading"} 0
nginx_vts_main_connections{status="requests"} 97
nginx_vts_main_connections{status="waiting"} 7
nginx_vts_main_connections{status="writing"} 1
# HELP nginx_vts_main_shm_usage_bytes Shared memory [ngx_http_vhost_traffic_status] info
# TYPE nginx_vts_main_shm_usage_bytes gauge
nginx_vts_main_shm_usage_bytes{shared="max_size"} 1048575
nginx_vts_main_shm_usage_bytes{shared="used_size"} 3525
nginx_vts_main_shm_usage_bytes{shared="used_node"} 1

部署Node_exporter

  • 需要每台需要监控的服务器上面部署该服务
[root@portal_node_1 ~]# useradd -d /home/metric metric
[metric@portal_node_1 ~]# rz -be node_exporter-1.3.1.linux-amd64.tar.gz ----上传Node-exporter包到服务器
[metric@portal_node_1 ~]# tar xvf soft/node_exporter-1.3.1.linux-amd64.tar.gz
[metric@portal_node_1 ~]# mv node_exporter-1.3.1.linux-amd64 node_exporter-1.3.1
[metric@portal_node_1 ~]# cd node_exporter-1.3.1
[metric@portal_node_1 node_exporter-1.3.1]# vim start.sh #编写启动脚本
#!/bin/bash
jarfilename=node_exporter
AppPath=/home/metric/node_exporter/
start()
{
nohup $AppPath$jarfilename > /dev/null &
}
stop()
{
kill -9 $(ps -ef|grep $jarfilename|grep -v grep |awk '{print $2}')
}
status()
{
ps -ef|grep $jarfilename|grep -v grep
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
status
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
esac
[metric@portal_node_1 node_exporter]$ ./start.sh start
[metric@portal_node_1 node_exporter]$ curl 127.0.0.1:9100/metrics

部署Mysql_exporter

  • Mysql_exporter部署
[root@localhost ~]# su - metric
[metric@localhost ~]$ rz -be mysqld_exporter-0.13.0.linux-amd64.tar.gz
[metric@localhost ~]$ tar xvf soft/mysqld_exporter-0.13.0.linux-amd64.tar.gz
[metric@localhost ~]$ mv mysqld_exporter-0.13.0.linux-amd64 mysqld_exporter-0.13.0
[metric@localhost ~]$ cd mysqld_exporter-0.13.0
[metric@localhost mysqld_exporter-0.13.0]$ vim mysqld_exporter.cnf
[client]
user=ucds
password=ucds
host=10.130.41.51
port=3306
[metric@localhost mysqld_exporter-0.13.0]$ vim start.sh
#!/bin/bash
jarfilename=mysqld_exporter
AppPath=/home/metric/mysqld_exporter-0.13.0/
configname=mysqld_exporter.cnf
start()
{
nohup $AppPath$jarfilename --config.my-cnf="$AppPath$configname" > /dev/null &
}
stop()
{
kill -9 $(ps -ef|grep $jarfilename|grep -v grep |awk '{print $2}')
}
status()
{
ps -ef|grep $jarfilename|grep -v grep
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
status
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
esac
[metric@localhost mysqld_exporter-0.13.0]$ ./start.sh start
[metric@localhost mysqld_exporter-0.13.0]$ curl http://10.130.41.120:9104/metrics ----验证结果
# HELP mysql_up Whether the MySQL server is up.
# TYPE mysql_up gauge
mysql_up 1 ----成功获取到1个 Mysql 实例

部署docker_exporet

  • 部署docker_exporet
在docker中运行启动cadvisor容器之前需要做如下操作否则启动失败
[root@my-dev ~]#mount -o remount,rw '/sys/fs/cgroup'
[root@my-dev ~]#ln -s /sys/fs/cgroup/cpu,cpuacct /sys/fs/cgroup/cpuacct,cpu
[root@my-dev ~]# docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/sys:/sys:ro --volume=/var/lib/docker:/var/lib/docker:ro --publish=50715:8080 --detach=true --name=cadvisor google/cadvisor:latest
[root@my-dev ~]#curl 127.0.0.1:50715/metrics 是否出相关数据

部署redis_exporter

  • redis_exporter部署
[root@localhost ~]# su - metric
[metric@localhost ~]$ rz -be redis_exporter-v1.33.0.linux-amd64.tar.gz
[metric@localhost ~]$ tar xvf soft/redis_exporter-v1.33.0.linux-amd64.tar.gz
[metric@localhost ~]$mv redis_exporter-v1.33.0.linux-amd64 redis_exporter-v1.33.0
[metric@localhost ~]$ cd redis_exporter-v1.33.0
[metric@localhost redis_exporter-v1.33.0]$ vim start.sh
#!/bin/bash
jarfilename=redis_exporter
AppPath=/home/metric/redis_exporter-v1.33.0/
redisip=10.130.41.43:6380 #连接redis主节点ip和端口
redispasswd=e4M8s2nyUC44MJXd
start()
{
# nohup $AppPath$jarfilename -redis.addr redis://$redisip -redis.password $redispasswd > /dev/null & #redis加密
# nohup $AppPath$jarfilename -redis.addr redis://$redisip > /dev/null & #redis不加密
}
stop()
{
kill -9 $(ps -ef|grep $jarfilename|grep -v grep |awk '{print $2}')
}
status()
{
ps -ef|grep $jarfilename|grep -v grep
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
status
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
esac
[metric@localhost redis_exporter-v1.33.0]$ ./start.sh start
[metric@localhost redis_exporter-v1.33.0]$ curl http://10.130.41.120:9121/metrics
# HELP redis_up Information about the Redis instance
# TYPE redis_up gauge
redis_up 1 ----成功获取到1个 Redis 实例

部署Prometheus Server

  • 部署Prometheus Server
[metric@localhost ~]$ rz -beprometheus-2.32.1.linux-amd64.tar.gz
[metric@localhost ~]$ tar xvf soft/prometheus-2.32.1.linux-amd64.tar.gz
[metric@localhost ~]$ mv prometheus-2.32.1.linux-amd64 prometheus-2.32.1
[metric@localhost ~]$ cd prometheus-2.32.1/
#需要使用root用户操作
[root@localhost ~]# vim /usr/lib/systemd/system/prometheus.service
[Unit]
Description=Prometheus server daemon
After=network.target
[Service]
Type=simple
User=metric
Group=metric
ExecStart=/home/metric/prometheus-2.32.1/prometheus \
--config.file "/home/metric/prometheus-2.32.1/prometheus.yml" \
--storage.tsdb.path "/home/metric/prometheus-2.32.1/data" \
--storage.tsdb.retention.time=45d \
--web.console.templates "/home/metric/prometheus-2.32.1/consoles" \
--web.console.libraries "/home/metric/prometheus-2.32.1/console_libraries" \
--web.max-connections=1024 \
--web.external-url "http://10.130.41.120:9090" \ #本机ip
--web.listen-address "10.130.41.120:9090" \ #本机ip
--web.enable-lifecycle
Restart=no
[Install]
WantedBy=multi-user.target
 
[root@localhost ~]# systemctl daemon-reload
[root@localhost home]# su - metric
[metric@localhost ~]$ cd prometheus-2.32.1/
[metric@localhost prometheus-2.32.1]$ vim prometheus.yml
global:
  scrape_interval: 10s          ----调整为10秒抓取一次
  scrape_timeout: 5s        ----设置超时时间为5s
  evaluation_interval: 15s
 
scrape_configs:
  - job_name: "prometheus"
    static_configs:
      - targets: ["10.130.41.120:9090"]            ----修改localhost为本机IP
 
  - job_name: "nginx-metircs"         ----新增 Nginx vts 配置
    metrics_path: /status/format/prometheus
    static_configs:
      - targets: ['10.130.41.10:8091']     ----多个节点使用 , 分隔
 
  - job_name: "node-exporter"     ----新增 node-exporter 配置
    metrics_path: /metrics
    static_configs:
      - targets:
        - "10.130.41.10:9100"
        - "10.130.41.120:9100"
 
  - job_name: "mysql-exporter"        ----新增 mysql-exporter 配置
    metrics_path: /metrics
    static_configs:
      - targets: ['10.130.41.120:9104']
 
  - job_name: "redis-exporter-targets"            ----新增 Redis 节点设置
    static_configs:
      - targets:
        - redis://10.130.41.42:6379            ----存在多节点时,将所有节点都写上
        - redis://10.130.41.43:6380            ----存在多节点时,将所有节点都写上
        - redis://10.130.41.44:6381            ----存在多节点时,将所有节点都写上
    metrics_path: /scrape
    relabel_configs:
      - source_labels: [__address__]
        target_label: __param_target
      - source_labels: [__param_target]
        target_label: instance
      - target_label: __address__
        replacement: 10.130.41.120:9121          ----填写 Redis-exporter IP:PORT
 
  - job_name: "redis-exporter"            ----新增 Redis-exporter 配置
    metrics_path: /metrics
    static_configs:
       - targets:
         - 10.130.41.120:9121
 
- job_name: "docker-exporter" ---新增 docker-exporter 配置
metrics_path: /metrics
static_configs:
- targets:
- 10.130.41.10:50715
[metric@localhost prometheus-2.32.1]$ ./promtool check config prometheus.yml #检测配置是否异常
Checking prometheus.yml
SUCCESS: 0 rule files found
[metric@localhost prometheus-2.32.1]$ exit
[root@localhost home]# systemctl daemon-reload
[root@localhost home]# systemctl restart prometheus
[root@localhost home]# systemctl status prometheus
[root@localhost home]# curl -s http://10.130.41.120:9090/metrics | grep targets           ----检查 targets 数量是否正确
  # HELP prometheus_sd_discovered_targets Current number of discovered targets.
  # TYPE prometheus_sd_discovered_targets gauge
  prometheus_sd_discovered_targets{config="config-0",name="notify"} 0
  prometheus_sd_discovered_targets{config="mysql-exporter",name="scrape"} 1
  prometheus_sd_discovered_targets{config="nginx-metircs",name="scrape"} 2
  prometheus_sd_discovered_targets{config="nginx-stub-status",name="scrape"} 2
  prometheus_sd_discovered_targets{config="node-exporter",name="scrape"} 2
  prometheus_sd_discovered_targets{config="prometheus",name="scrape"} 1
  prometheus_sd_discovered_targets{config="redis-exporter",name="scrape"} 1
  prometheus_sd_discovered_targets{config="redis-exporter-targets",name="scrape"} 1

部署Grafana Server

  • 部署Grafana Server
[root@localhost home]# su - metric
[metric@localhost ~]$ rz -be grafana-enterprise-8.3.3.linux-amd64.tar.gz
[metric@localhost ~]$ tar xvf grafana-enterprise-8.3.3.linux-amd64.tar.gz
[metric@localhost ~]$ mv grafana-enterprise-8.3.3.linux-amd64 grafana-8.3.3
[metric@localhost ~]$ cd grafana-8.3.3 bin
[root@localhost conf]# cd /home/metric/grafana-8.3.3/conf
[root@localhost conf]# cp sample.ini grafana.ini
[root@localhost conf]# cd ..
[root@localhost grafana-8.3.3]# vim start.sh
#!/bin/bash
jarfilename=grafana-server
AppPath=/home/metric/grafana-8.3.3/bin/
configpath=/home/metric/grafana-8.3.3/conf/
configname=grafana.ini
start()
{
nohup $AppPath$jarfilename -config $configpath$configname > /dev/null &
}
stop()
{
kill -9 $(ps -ef|grep $jarfilename|grep -v grep |awk '{print $2}')
}
status()
{
ps -ef|grep $jarfilename|grep -v grep
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
status
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
esac
[root@localhost grafana-8.3.3]# ./start.sh start

Grafana-WebUI配置

  • 配置Grafana访问 http://10.130.41.120:3000 默认用户密码都是admin

  • 添加数据源.返回滑到最下面,选择Save & test 1.png

  • 然后导致相关对应Dashboard,然后再选择对应json文件

2.png

3.png

posted @   平凡的运维之路  阅读(39)  评论(0编辑  收藏  举报  
相关博文:
阅读排行:
· winform 绘制太阳,地球,月球 运作规律
· AI与.NET技术实操系列(五):向量存储与相似性搜索在 .NET 中的实现
· 超详细:普通电脑也行Windows部署deepseek R1训练数据并当服务器共享给他人
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
· 上周热点回顾(3.3-3.9)
点击右上角即可分享
微信分享提示