prometheus 数据传输到远程 VictoriaMetrics 存储
查看prometheus
kubectl get prometheus k8s -n kubesphere-monitoring-system -o yaml
添加 http://172.31.115.19:8480/insert/0/prometheus/api/v1/write , 使prometheus 数据传输到远程 VictoriaMetrics 存储
没法设置不存到本地的tsdb。如果实在不想的话,可以设置本地存储的保留时间为1个小时。
prome 在本地获取不到的时候再去远程读取数据。
但是实际的场景下,我们这边本地存储是保留时间是1天,因为prometheus的远程读的效率并没有本地读的高,其实整个监控的本质,都是频繁读取最近的数据,越是历史数据被读取的少。这样做对远程存储介质也好。
添加使用victoriaMetrics 读写接口 和 保存1天
[root@master1 ~]# kubectl get prometheus k8s -n kubesphere-monitoring-system -o yaml | grep retention {"apiVersion":"monitoring.coreos.com/v1","kind":"Prometheus","metadata":{"annotations":{},"name":"k8s","namespace":"kubesphere-monitoring-system"},"spec":{"additionalScrapeConfigs":{"key":"prometheus-additional.yaml","name":"additional-scrape-configs"},"affinity":{"nodeAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"preference":{"matchExpressions":[{"key":"node-role.kubernetes.io/monitoring","operator":"Exists"}]},"weight":100}]},"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"prometheus","operator":"In","values":["k8s"]}]},"namespaces":["kubesphere-monitoring-system"],"topologyKey":"kubernetes.io/hostname"},"weight":100}]}},"alerting":{"alertmanagers":[{"name":"alertmanager-main","namespace":"kubesphere-monitoring-system","port":"web"}]},"image":"192.168.15.198/prom/prometheus:v2.20.1","nodeSelector":{"kubernetes.io/os":"linux"},"podMonitorNamespaceSelector":{},"podMonitorSelector":{},"query":{"maxConcurrency":1000},"remoteWrite":[{"url":"http://172.31.115.19:8480/insert/0/prometheus/api/v1/write"}],"replicas":2,"resources":{"limits":{"cpu":"4","memory":"16Gi"},"requests":{"cpu":"200m","memory":"400Mi"}},"retention":"7d","ruleSelector":{"matchLabels":{"prometheus":"k8s","role":"alert-rules"}},"scrapeInterval":"1m","securityContext":{"fsGroup":0,"runAsNonRoot":false,"runAsUser":0},"serviceAccountName":"prometheus-k8s","serviceMonitorNamespaceSelector":{},"serviceMonitorSelector":{},"storage":{"volumeClaimTemplate":{"spec":{"resources":{"requests":{"storage":"20Gi"}},"storageClassName":"nfs-storage"}}},"tolerations":[{"effect":"NoSchedule","key":"dedicated","operator":"Equal","value":"monitoring"}],"version":"v2.20.1"}} f:retention: {} retention: 7d [root@master1 ~]# kubectl get prometheus k8s -n kubesphere-monitoring-system -o yaml apiVersion: monitoring.coreos.com/v1 kind: Prometheus metadata: name: k8s namespace: kubesphere-monitoring-system spec: additionalScrapeConfigs: key: prometheus-additional.yaml name: additional-scrape-configs affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - preference: matchExpressions: - key: node-role.kubernetes.io/monitoring operator: Exists weight: 100 podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: prometheus operator: In values: - k8s namespaces: - kubesphere-monitoring-system topologyKey: kubernetes.io/hostname weight: 100 alerting: alertmanagers: - name: alertmanager-main namespace: kubesphere-monitoring-system port: web image: 192.168.15.198/prom/prometheus:v2.20.1 nodeSelector: kubernetes.io/os: linux podMonitorNamespaceSelector: {} podMonitorSelector: {} query: maxConcurrency: 1000 remoteRead: - url: http://172.31.115.19:8481/select/0/prometheus remoteWrite: - url: http://172.31.115.19:8480/insert/0/prometheus/api/v1/write replicas: 2 resources: limits: cpu: "4" memory: 16Gi requests: cpu: 200m memory: 400Mi retention: 1d ruleSelector: matchLabels: prometheus: k8s role: alert-rules scrapeInterval: 1m securityContext: fsGroup: 0 runAsNonRoot: false runAsUser: 0 serviceAccountName: prometheus-k8s serviceMonitorNamespaceSelector: {} serviceMonitorSelector: {} storage: volumeClaimTemplate: spec: resources: requests: storage: 20Gi storageClassName: nfs-storage tolerations: - effect: NoSchedule key: dedicated operator: Equal value: monitoring version: v2.20.1