Kubernetes 部署 ELK
Kubernetes 部署 ELK
项目背景
k8s集群搭建后,平时运维过程中不仅要观察监控平台,查看集群运行情况,还要在集群出现问题时,对问题点进行及时定位,由于集群内pod过多后,日志定位比较费时,因此一个集中式的日志文件系统成了运维人员的好帮手,本次实验采用的是filebeat+logstash+elasticsearch+kibana
组件功能描述
filebeat负责采集每个节点上宿主机和容器的日志,发送给logstash,logstash过滤不必要的信息后传递给elasticsearch进行储存,kibana展示储存在elasticsearch上的数据,并可以通过查询语句提取关键字
这里说一下为什么需要filebeat+logstash而不是单独的logstash,因为logstash中的jvm消耗的资源比较多,性能没有filebeat好,其实也可以不使用logstash,单独使用filebeat,logstash主要作用是有很多插件可以使用,并且提供过滤功能
filebeat收集数据
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: logging
data:
filebeat.yml: |
filebeat.inputs:
- type: log
paths:
- /var/log/nginx
document_type: k8s-nginx
setup.template.name: "k8s-nginx"
setup.template.pattern: "k8s-nginx-*"
output.elasticsearch:
hosts: ["elasticsearch:9200"]
index: "k8s-nginx-%{+yyyy.MM.dd}"
# output.logstash:
# hosts: ['logging-logstash:5044']
# enabled: true
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: logging
spec:
selector:
matchLabels:
app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
app: filebeat
spec:
terminationGracePeriodSeconds: 30
containers:
- name: filebeat
image: elastic/filebeat:7.14.0
imagePullPolicy: IfNotPresent
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: log
mountPath: /var/log/
volumes:
- name: config
configMap:
defaultMode: 0755
name: filebeat-config
- name: log
hostPath:
path: /var/log/
type: Directory
logstash过滤数据
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
namespace: logging
labels:
k8s-app: logstash-configmap
data:
logstash.conf: |
input {
beats {
port => "5044"
codec => "json"
}
}
filter{
json{
source => "message"
remove_field => "message"
}
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
index => "nginx-json-log-%{+YYYY.MM.dd}"
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: logging
spec:
replicas: 1
selector:
matchLabels:
k8s-app: logstash
template:
metadata:
labels:
k8s-app: logstash
spec:
containers:
- name: logstash
image: logstash:7.14.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5044
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/pipeline/
volumes:
- name: config-volume
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf
---
apiVersion: v1
kind: Service
metadata:
name: logstash
namespace: logging
spec:
ports:
- port: 5044
targetPort: 5044
protocol: TCP
selector:
k8s-app: logstash
type: ClusterIP
es存储数据
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv
namespace: logging
spec:
capacity: # PV的存储容量
storage: 1Gi
accessModes: # PV的访问模式
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-storage
nfs:
path: /data
server: 192.168.206.135
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: logging
labels:
app: elasticsearch
spec:
selector:
k8s-app: elasticsearch
clusterIP: None
ports:
- port: 9200
name: db
- port: 9300
name: inter
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch
namespace: logging
labels:
k8s-app: elasticsearch
spec:
serviceName: elasticsearch
selector:
matchLabels:
k8s-app: elasticsearch
template:
metadata:
labels:
k8s-app: elasticsearch
spec:
containers:
- image: elasticsearch:7.14.0
name: elasticsearch
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 0.5
memory: 500Mi
env:
- name: "discovery.type"
value: "single-node"
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx2g"
ports:
- containerPort: 9200
name: db
protocol: TCP
- name: inter
containerPort: 9300
volumeMounts:
- name: elasticsearch-data
mountPath: /usr/share/elasticsearch/data
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
storageClassName: "nfs-storage"
accessModes: [ "ReadWriteMany" ]
resources:
requests:
storage: 1Gi
# 查询es索引
curl http://elasticsearch:9200/_cat/indices?v
kibana展示数据
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: logging
labels:
k8s-app: kibana
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kibana
template:
metadata:
labels:
k8s-app: kibana
spec:
containers:
- name: kibana
image: kibana:7.14.0
resources:
limits:
cpu: 1
memory: 1G
requests:
cpu: 0.5
memory: 500Mi
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch:9200
ports:
- containerPort: 5601
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: logging
spec:
ports:
- port: 5601
protocol: TCP
targetPort: 5601
nodePort: 30000
type: NodePort
selector:
k8s-app: kibana
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kibana
namespace: logging
spec:
rules:
- host: "dev-1.ad2cloud.cn"
http:
paths:
- path: /kibana
pathType: Prefix
backend:
service:
name: kibana
port:
number: 5601
参考博客地址