Prometheus监控携带metrics接口的服务
Prometheus监控携带metrics接口的服务
prometheus监控分为两种:
1、携带metircs接口的服务
2、不携带metrics接口的服务
一、prometheus监控携带metrics接口的服务的流程:
携带metrics接口的服务就表示可以通过metrics接口获取服务的监控项和监控信息
(PS:此处以ETCD为例)
1、通过endprints获取锁需要监控的ETCD的地址
2、创建Service,给予集群内部的ServiceMoniter使用
3、创建ServiceMoniter部署需要的访问证书
4、重启promutheus监控Pod,载入监控项
二、通过prometheus监控ETCD
ETCD服务携带metrics接口,prometheus监控它的流程为:
1、通过EndPrints获取所要监控的ETCD的地址
2、创建Service,给予集群内部的ServiceMoniter使用
3、创建ServiceMoniter部署需要访问证书,给予prometheus-k8s-0来使用
4、重启prometheus监控Pod (prometheus-k8s-0),载入监控项
1、测试ETCD服务的metrics接口
[root@k8s-master-01 ~]# netstat -lnutp | grep etcd
tcp 0 0 192.168.15.31:2379 0.0.0.0:* LISTEN 2692/etcd
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN 2692/etcd
tcp 0 0 192.168.15.31:2380 0.0.0.0:* LISTEN 2692/etcd
tcp 0 0 127.0.0.1:2381 0.0.0.0:* LISTEN 2692/etcd
[root@k8s-master-01 ~]# curl -k --cert /etc/kubernetes/pki/apiserver-etcd-client.crt --key /etc/kubernetes/pki/apiserver-etcd-client.key https://127.0.0.1:2379/metrics
#ETCD端口:
2379 提供HTTP API服务
2380 和集群中其他节点通信
2、通过EndPrints获取所需监控的ETCD的地址
kind: Endpoints
apiVersion: v1
metadata:
namespace: kube-system
name: etcd-moniter
labels:
k8s: etcd
subsets:
- addresses:
- ip: "192.168.15.31"
ports:
- port: 2379
protocol: TCP
name: etcd
[root@k8s-master-01 ~]# kubectl apply -f endprints.yaml
endpoints/etcd-moniter created
[root@k8s-master-01 ~]# kubectl get endpoints -n kube-system
NAME ENDPOINTS AGE
etcd-moniter 192.168.15.31:2379 17s
3、创建Service,给予集群内部的ServiceMoniter使用
kind: Service
apiVersion: v1
metadata:
name: etcd-moniter
namespace: kube-system
labels:
k8s: etcd
spec:
ports:
- port: 2379
targetPort: 2379
name: etcd
protocol: TCP
[root@k8s-master-01 ~]# kubectl apply -f service.yaml
service/etcd-moniter created
[root@k8s-master-01 ~]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
etcd-moniter ClusterIP 10.103.237.209 <none> 2379/TCP 13s
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 25d
kubelet ClusterIP None <none> 10250/TCP,10255/TCP,4194/TCP 44h
[root@k8s-master-01 ~]# kubectl describe -n kube-system svc etcd-moniter
Name: etcd-moniter
Namespace: kube-system
Labels: k8s=etcd
Annotations: <none>
Selector: <none>
Type: ClusterIP
IP Families: <none>
IP: 10.103.237.209
IPs: 10.103.237.209
Port: etcd 2379/TCP
TargetPort: 2379/TCP
Endpoints: 192.168.15.31:2379
Session Affinity: None
Events: <none>
4、创建ServiceMoniter部署需要访问证书
kind: ServiceMonitor
apiVersion: monitoring.coreos.com/v1
metadata:
labels:
k8s: etcd
name: etcd-monitor
namespace: monitoring
spec:
endpoints:
- interval: 3s
port: etcd
scheme: https
tlsConfig:
caFile: /etc/prometheus/secrets/etcd-certs/ca.crt
certFile: /etc/prometheus/secrets/etcd-certs/peer.crt
keyFile: /etc/prometheus/secrets/etcd-certs/peer.key
insecureSkipVerify: true
selector:
matchLabels:
k8s: etcd
namespaceSelector:
matchNames:
- "kube-system"
[root@k8s-master-01 ~]# kubectl apply -f servicemoniter.yaml
servicemonitor.monitoring.coreos.com/etcd-monitor created
[root@k8s-master-01 ~]# kubectl get ServiceMonitor -n monitoring
NAME AGE
etcd-monitor 24s
5、重启prometheus监控Pod(prometheus-k8s-0),载入监控项
创建一个secrets,用来保存prometheus监控的etcd的证书
[root@k8s-master-01 ~]# kubectl create secret generic etcd-certs -n monitoring --from-file=/etc/kubernetes/pki/etcd/ca.crt --from-file=/etc/kubernetes/pki/etcd/peer.crt --from-file=/etc/kubernetes/pki/etcd/peer.key
secret/etcd-certs created
[root@k8s-master-01 ~]# kubectl get secret -n monitoring
NAME TYPE DATA AGE
etcd-certs Opaque 3 14s
kind: Prometheus
apiVersion: monitoring.coreos.com/v1
metadata:
labels:
prometheus: k8s
name: k8s
namespace: monitoring
spec:
alerting:
alertmanagers:
- name: alertmanager-main
namespace: monitoring
port: web
- name: alertmanager-main-etcd
namespace: kube-system
port: etcd
image: quay.io/prometheus/prometheus:v2.15.2
nodeSelector:
kubernetes.io/os: linux
podMonitorNamespaceSelector: {}
podMonitorSelector: {}
replicas: 2
resources:
requests:
memory: 400Mi
ruleSelector:
matchLabels:
prometheus: k8s
role: alert-rules
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
serviceAccountName: prometheus-k8s
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
version: v2.15.2
secrets:
- etcd-certs
结果
[root@k8s-master-01 ~]# kubectl apply -f prometheus-k8s-0.yaml
prometheus.monitoring.coreos.com/k8s configured
[root@k8s-master-01 ~]# kubectl get pod -n monitoring
NAME READY STATUS RESTARTS AGE
prometheus-k8s-0 3/3 Running 0 24s
prometheus-k8s-1 0/3 Pending 0 24s