k8s安装efk日志收集fluent+elasticsearch+kibanav2
k8s安装EFK日志收集v2
基础安装nfs、rpcbind参考前一篇文章
es、kibana版本使用8.2.0,使用xpack.security.transport.ssl.verification_mode等加密
handlessSvc.yaml
[root@node81 logging]# cat handlessSvc.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch
namespace: logging
labels:
app: elasticsearch
spec:
selector:
app: elasticsearch
clusterIP: None
ports:
- port: 9200
name: rest
- port: 9300
name: inter-node
serviceAccount.yaml
[root@node81 logging]# cat serviceAccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
namespace: logging
rbac.yaml
[root@node81 logging]# cat rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
namespace: logging
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
namespace: logging
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: logging
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
namespace: logging
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
namespace: logging
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: logging
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io
npv.yaml
[root@node81 logging]# cat npv.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
namespace: logging
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
nodeSelector:
kubernetes.io/hostname: node81
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: eslog/nfs #PROVISIONER_NAME是eslog/nfs,eslog/nfs需要跟后面的storageclass的provisinoer保持一致
- name: NFS_SERVER
value: 192.168.0.81 #这个需要写nfs服务端所在的ip地址,此处为k8s-elasticsearch地址
- name: NFS_PATH
value: /data/eslog #共享目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.0.81 #这个是nfs服务端的ip,大家需要写自己的nfs地址
path: /data/eslog
class.yaml
[root@node81 logging]# cat class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: es-block-storage
namespace: logging
provisioner: eslog/nfs
es.yaml
[root@node81 logging]# cat es.yaml
# RBAC authn and authz
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch
namespace: logging
labels:
app: elasticsearch
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: elasticsearch
namespace: logging
labels:
app: elasticsearch
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: elasticsearch
namespace: logging
labels:
app: elasticsearch
subjects:
- kind: ServiceAccount
name: elasticsearch
namespace: logging
apiGroup: ""
roleRef:
kind: ClusterRole
name: elasticsearch
apiGroup: ""
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-cluster
namespace: logging
labels:
app: elasticsearch
spec:
serviceName: elasticsearch
replicas: 3
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
nodeSelector:
kube-soft: elasticsearch
serviceAccountName: elasticsearch
containers:
- name: elasticsearch
image: elasticsearch:8.2.0
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 1000m #单个容器最多可使用1个CPU
requests:
cpu: 100m #单个容器最少保证有0.1个CPU
ports:
- containerPort: 9200
name: rest #与handless service一致
protocol: TCP
- containerPort: 9300
name: inter-node
protocol: TCP
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
env:
- name: cluster.name #集群名称
value: k8s-logs
- name: node.name #节点名称,通过matedata.name获取
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: discovery.seed_hosts #设置在Elasticsearch集群中节点相互连接的发现方法,由于都在同一个 namespace 下面,我们可以将其缩短为es-cluster-[0,1,2].elasticsearch
value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch"
- name: cluster.initial_master_nodes
value: "es-cluster-0,es-cluster-1,es-cluster-2"
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m" #告诉JVM使用512MB的最小和最大堆
- name: network.host
value: "0.0.0.0" #设置所有服务器都能访问
- name: xpack.monitoring.collection.enabled
value: "true" #kibana获取es集群信息配置,不开启需安装metrics才能连接es和kibana
- name: xpack.security.transport.ssl.enabled
value: "true"
- name: xpack.security.transport.ssl.verification_mode
value: "certificate"
- name: xpack.security.transport.ssl.keystore.path
value: "/usr/share/elasticsearch/data/elastic-certificates.p12"
- name: xpack.security.transport.ssl.truststore.path
value: "/usr/share/elasticsearch/data/elastic-certificates.p12"
- name: xpack.security.enabled
value: "true"
initContainers: #这里定义了几个在主应用程序之前运行的Init 容器,这些初始容器按照定义的顺序依次执行,执行完成后才会启动主应用容器。
- name: fix-permissions
image: busybox
imagePullPolicy: IfNotPresent
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
securityContext:
privileged: true
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
#第一个名为 fix-permissions 的容器用来运行 chown 命令,将 Elasticsearch 数据目录的用户和组更改为1000:1000(Elasticsearch 用户的 UID)。
#因为默认情况下,Kubernetes 用 root 用户挂载数据目录,这会使得 Elasticsearch 无法方法该数据目录
- name: increase-vm-max-map
image: busybox
imagePullPolicy: IfNotPresent
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
#第二个名为increase-vm-max-map 的容器用来增加操作系统对mmap计数的限制,默认情况下该值可能太低,导致内存不足的错误
- name: increase-fd-ulimit
image: busybox
imagePullPolicy: IfNotPresent
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
#最后一个初始化容器是用来执行ulimit命令增加打开文件描述符的最大数量的。
#此外 Elastisearch Notes for Production Use 文档还提到了由于性能原因最好禁用 swap,当然对于 Kubernetes 集群而言,最好也是禁用 swap 分区的
volumeClaimTemplates:
- metadata:
name: data
labels:
app: elasticsearch
spec:
accessModes: [ "ReadWriteOnce" ] #只能被 mount到单个节点上进行读写
storageClassName: es-block-storage #需要提前创建该对象,我们这里使用的 NFS 作为存储后端,所以需要安装一个对应的 provisioner驱动
resources:
requests:
storage: 10Gi
kibana.yaml
[root@node81 logging]# cat kibana.yaml
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: logging
labels:
app: kibana
spec:
type: NodePort #为了测试方便,我们将 Service 设置为了 NodePort 类型
ports:
- port: 5601
selector:
app: kibana
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: logging
name: kibana-config
labels:
app: kibana
data:
kibana.yml: |-
server.host: 0.0.0.0
xpack.encryptedSavedObjects.encryptionKey: "lgFubCJ5pBdtbuTiR9iLlgFubCJ5pBdtbuTiR9iL"
elasticsearch:
hosts: ${ELASTICSEARCH_HOSTS}
username: ${ELASTICSEARCH_USER}
password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: logging
labels:
app: kibana
spec:
replicas: 1
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
nodeSelector:
kube-soft: elasticsearch
containers:
- name: kibana
image: kibana:8.2.0 #kibana版本需要与es版本一致
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5601
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch:9200 #设置为handless service 的名称
- name: ELASTICSEARCH_USER
value: "kibana"
- name: ELASTICSEARCH_PASSWORD
value: "lgFubCJ5pBdtbuTiR0iL"
- name: SERVER_NAME
value: kibana
- name: "I18N_LOCALE"
value: "zh-CN"
volumeMounts:
- name: config
mountPath: /usr/share/kibana/config/kibana.yml
readOnly: true
subPath: kibana.yml
volumes:
- name: config
configMap:
name: kibana-config
fluentd.yaml
[root@node81 logging]# cat fluentd.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: logging
labels:
app: fluentd
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluentd
namespace: logging
labels:
app: fluentd
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd
namespace: logging
labels:
app: fluentd
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: logging
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: logging
labels:
app: fluentd
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
app: fluentd
template:
metadata:
labels:
app: fluentd
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
# tolerations: #设置容忍,允许部署到master节点,获取master数据
# - key: node-role.kubernetes.io/master
# effect: NoSchedule
nodeSelector:
kube-soft: saas
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch #镜像选择不同,对应的配置文件方式也不同注意进入fluent容器调整配置
imagePullPolicy: IfNotPresent
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
- name: FLUENT_ELASTICSEARCH_PORT
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
- name: FLUENT_ELASTICSEARCH_USER
value: "elastic"
- name: FLUENT_ELASTICSEARCH_PASSWORD
value: "dzCSRsplbYFf2p7jA4Iz"
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: datadockerdatacontainers
mountPath: /data/dockerdata/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: datadockerdatacontainers
hostPath:
path: /data/dockerdata/containers #宿主机docker文件目录,这里我有修改
安装es
[root@node81 logging]# kubectl create ns logging
[root@node81 logging]# kubectl apply -f handlessSvc.yaml
[root@node81 logging]# kubectl apply -f serviceAccount.yaml
[root@node81 logging]# kubectl apply -f rbac.yaml
[root@node81 logging]# kubectl apply -f npv.yaml
[root@node81 logging]# kubectl apply -f class.yaml
生成Xpack认证文件
[root@node81 logging]# docker run -it -d --name elastic-cret elasticsearch:8.2.0 /bin/bash
[root@node81 logging]# docker exec -it elastic-cret /bin/bash
elasticsearch@2b9709c1f2d9:~$ ./elasticsearch-certutil ca
This tool assists you in the generation of X.509 certificates and certificate
signing requests for use with SSL/TLS in the Elastic stack.
Please enter the desired output file [elastic-stack-ca.p12]:
Enter password for elastic-stack-ca.p12 :
-------------------------------------
elasticsearch@2b9709c1f2d9:~$ ./bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
This tool assists you in the generation of X.509 certificates and certificate
signing requests for use with SSL/TLS in the Elastic stack.
......
Enter password for CA (elastic-stack-ca.p12) :
Please enter the desired output file [elastic-certificates.p12]:
Enter password for elastic-certificates.p12 :
Certificates written to /usr/share/elasticsearch/elastic-certificates.p12
#以上都直接回车即可
[root@node81 logging]# docker cp elastic-cret:/usr/share/elasticsearch/elastic-certificates.p12 .
[root@node81 logging]# docker stop elastic-cret
[root@node81 logging]# docker rm elastic-cret
#将xpack文件复制到nfs目录下,然后在nfs上复制进容器目录
[root@node81 logging]# scp elastic-certificates.p12 root@192.168.0.81:/data/eslog/
安装es
[root@node81 logging]# kubectl apply -f es.yaml
创建es密码
[root@node81 logging]# kubectl exec es-cluster-0 -n logging -- bin/elasticsearch-setup-passwords auto -b
Changed password for user apm_system
PASSWORD apm_system = 5wg8JbmKOKiLMNty90l1
Changed password for user kibana_system
PASSWORD kibana_system = 1bT0U5RbPX1e9zGNlWFL
Changed password for user kibana
PASSWORD kibana = 1bT0U5RbPX1e9zGNlWFL
Changed password for user logstash_system
PASSWORD logstash_system = 1ihEyA5yAPahNf9GuRJ9
Changed password for user beats_system
PASSWORD beats_system = WEWDpPndnGvgKY7ad0T9
Changed password for user remote_monitoring_user
PASSWORD remote_monitoring_user = MOCszTmzLmEXQrPIOW4T
Changed password for user elastic
PASSWORD elastic = bbkrgVrsE3UAfs2708aO
安装kibana、fluentd
将es密码填入指定yaml位置
[root@node81 logging]# kubectl apply -f kibana.yaml
[root@node81 logging]# kubectl apply -f fluentd.yaml
参考:https://www.cnblogs.com/wubolive/p/15765671.html
https://github.com/fluent/fluentd-kubernetes-daemonset/blob/master/fluentd-daemonset-elasticsearch.yaml
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· DeepSeek 开源周回顾「GitHub 热点速览」
· 物流快递公司核心技术能力-地址解析分单基础技术分享
· .NET 10首个预览版发布:重大改进与新特性概览!
· AI与.NET技术实操系列(二):开始使用ML.NET
· 单线程的Redis速度为什么快?