k8s-prometheus+grafana
这是我直接在k8s集群中部署的,有一个缺点:grafana没有持久化存储 。 后期会补上,因为我我把PV,PVC怎么部署了
一,创建nasmespace
[root@k8s-master01 prometheus]# cat prom-grafana-namespaces.yaml
apiVersion: v1
kind: Namespace
metadata:
name: prom-grafana
labels:
name: prom-grafana
二,创建一个账号sa账号
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: prom-grafana
命令行:kubectl create serviceaccount drifter -n prom-grafana
三,把sa 账号drifter通过clusterrolebing绑定到clusterrole上
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: prom-grafana
命令行:kubectl create clusterrolebinding drifter-clusterrolebinding -n prom-grafana --clusterrole=cluster-admin --serviceaccount=prom-grafana:drifter
四,创建数据目录
mkdir /data chmod 777 /data/
五,安装node-exporter组件,
采集机器(物理机、虚拟机、云主机等)的监控指标数据,能够采集到的指标包括CPU, 内存,磁盘,网络,文件数等信息。
安装node-exporter组件,在k8s集群的master节点操作
[root@k8s-master01 prometheus]# cat node-export.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-exporter
namespace: prom-grafana
labels:
name: node-exporter
spec:
selector:
matchLabels:
name: node-exporter
template:
metadata:
labels:
name: node-exporter
spec:
hostPID: true
hostIPC: true
hostNetwork: true
containers:
- name: node-exporter
image: prom/node-exporter:v0.16.0
ports:
- containerPort: 9100
resources:
requests:
cpu: 0.15
securityContext:
privileged: true
args:
- --path.procfs
- /host/proc
- --path.sysfs
- /host/sys
- --collector.filesystem.ignored-mount-points
- '"^/(sys|proc|dev|host|etc)($|/)"'
volumeMounts:
- name: dev
mountPath: /host/dev
- name: proc
mountPath: /host/proc
- name: sys
mountPath: /host/sys
- name: rootfs
mountPath: /rootfs
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
volumes:
- name: proc
hostPath:
path: /proc
- name: dev
hostPath:
path: /dev
- name: sys
hostPath:
path: /sys
- name: rootfs
hostPath:
path: /
六,创建一个存储卷,来存放prometheus的配置文件
[root@k8s-master01 prometheus]# cat prometheus-cfg.yaml
kind: ConfigMap
apiVersion: v1
metadata:
labels:
app: prometheus
name: prometheus-config
namespace: prom-grafana
data:
prometheus.yml: |
global:
scrape_interval: 15s
scrape_timeout: 10s
evaluation_interval: 1m
scrape_configs:
- job_name: 'kubernetes-node'
kubernetes_sd_configs:
- role: node
relabel_configs:
- source_labels: [__address__]
regex: '(.*):10250'
replacement: '${1}:9100'
target_label: __address__
action: replace
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- job_name: 'kubernetes-node-cadvisor'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: 'kubernetes-apiserver'
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
七,通过deployment部署prometheus
[root@k8s-master01 prometheus]# cat prometheus-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-server
namespace: prom-grafana
labels:
app: prometheus
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
component: server
#matchExpressions:
#- {key: app, operator: In, values: [prometheus]}
#- {key: component, operator: In, values: [server]}
template:
metadata:
labels:
app: prometheus
component: server
annotations:
prometheus.io/scrape: 'false'
spec:
nodeName: k8s-master01
serviceAccountName: drifter
containers:
- name: prometheus
image: prom/prometheus:v2.2.1
imagePullPolicy: IfNotPresent
command:
- prometheus
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --storage.tsdb.retention=720h
ports:
- containerPort: 9090
protocol: TCP
volumeMounts:
- mountPath: /etc/prometheus/prometheus.yml
name: prometheus-config
subPath: prometheus.yml
- mountPath: /prometheus/
name: prometheus-storage-volume
volumes:
- name: prometheus-config
configMap:
name: prometheus-config
items:
- key: prometheus.yml
path: prometheus.yml
mode: 0644
- name: prometheus-storage-volume
hostPath:
path: /data
type: Directory
八,创建prometheus的svc来暴露端口
[root@k8s-master01 prometheus]# cat prometheus-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus-server
namespace: prom-grafana
labels:
app: prometheus
spec:
type: NodePort
# type: ClusterIP
ports:
- port: 9090
targetPort: 9090
# protocol: TCP
selector:
app: prometheus
component: server
九,创建ingress来使用域名访问
[root@k8s-master01 prometheus]# cat prometheus-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus-server
namespace: prom-grafana
labels:
app: prometheus
spec:
type: NodePort
# type: ClusterIP
ports:
- port: 9090
targetPort: 9090
# protocol: TCP
selector:
app: prometheus
component: server
[root@k8s-master01 prometheus]# cat prometheus-ing.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prometheus
namespace: prom-grafana
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "20M"
# nginx.ingress.kubernetes.io/ssl-redirect: false
# nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: prometheus.drifter.net
http:
paths:
- backend:
service:
name: prometheus-server
port:
number: 9090
path: /
pathType: ImplementationSpecific
status:
loadBalancer:
ingress:
- ip: 192.168.55.101
- ip: 192.168.55.102
- ip: 192.168.55.103
- ip: 192.168.55.104
- ip: 192.168.55.105
十,通过deployment部署grafana
[root@k8s-master01 prometheus]# cat grafana-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana-server
namespace: prom-grafana
labels:
app: grafana
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: grafana-server
template:
metadata:
labels:
app: grafana-server
spec:
containers:
- name: grafana
image: grafana/grafana:7.4.5
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
name: grafana
env:
- name: GF_SECURITY_ADMIN_USER
value: admin
- name: GF_SECURITY_ADMIN_PASSWORD
value: admin321
readinessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 30
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/health
port: 3000
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: 100m
memory: 256Mi
requests:
cpu: 100m
memory: 256Mi
十一,创建grafana的svc
[root@k8s-master01 prometheus]# cat grafana-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: grafnan-server
namespace: prom-grafana
labels:
app: grafnan-server
spec:
type: NodePort
# type: ClusterIP
ports:
- port: 80
targetPort: 3000
# protocol: TCP
selector:
app: grafana-server
十二,创建grafana的ingress
[root@k8s-master01 prometheus]# cat grafana-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: grafnan-server
namespace: prom-grafana
labels:
app: grafnan-server
spec:
type: NodePort
# type: ClusterIP
ports:
- port: 80
targetPort: 3000
# protocol: TCP
selector:
app: grafana-server
[root@k8s-master01 prometheus]# cat grafana-ing.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana-server
namespace: prom-grafana
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "20M"
# nginx.ingress.kubernetes.io/ssl-redirect: false
# nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: graf.drifter.net
http:
paths:
- backend:
service:
name: grafana-server
port:
number: 80
path: /
pathType: ImplementationSpecific
status:
loadBalancer:
ingress:
- ip: 192.168.55.101
- ip: 192.168.55.102
- ip: 192.168.55.103
- ip: 192.168.55.104
- ip: 192.168.55.105
然后把文件全部kubectl create -f .