k8s安装efk日志收集fluent+elasticsearch+kibanav1
k8s安装EFK日志收集v1
安装nfs
所有节点均安装
yum install -y nfs-utils rpcbind
systemctl start nfs-server
systemctl enable nfs-server
systemctl start rpcbind
#创建nfs共享目录
nfs共享节点操作
#mkdir -p /data/eslog
#vim /etc/exports
/data/eslog *(rw,no_root_squash) #设置允许访问该目录的IP地址,可设置为*,即允许所有IP
#exportfs -arv
#systemctl restart nfs-server
创建命名空间
root@node81[15:17:43]:/data/k8s/logging# kubectl create ns logging
namespace/logging created
创建无头服务handless service
用于es同时访问多个POD
root@node81[15:19:55]:/data/k8s/logging# cat handlessSvc.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch
namespace: logging
labels:
app: elasticsearch
spec:
selector:
app: elasticsearch
clusterIP: None
ports:
- port: 9200
name: rest
- port: 9300
name: inter-node
root@node81[15:20:04]:/data/k8s/logging# kubectl apply -f handlessSvc.yaml
service/elasticsearch created
root@node81[15:21:22]:/data/k8s/logging# kubectl get svc -n logging
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
elasticsearch ClusterIP None <none> 9200/TCP,9300/TCP 52s
#此处cluster-IP为none即为无头服务
创建sa账号并授权使用nfs
root@node81[15:28:05]:/data/k8s/logging# cat serviceAccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
namespace: logging
root@node81[15:28:47]:/data/k8s/logging# cat rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
namespace: logging
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
namespace: logging
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: logging
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
namespace: logging
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-provisioner
namespace: logging
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: logging
roleRef:
kind: Role
name: leader-locking-nfs-provisioner
apiGroup: rbac.authorization.k8s.io
#root@node81[15:28:58]:/data/k8s/logging# kubectl apply -f serviceAccount.yaml
serviceaccount/nfs-provisioner created
#root@node81[15:30:12]:/data/k8s/logging# kubectl apply -f rbac.yaml
clusterrole.rbac.authorization.k8s.io/nfs-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created
创建pod运行nfs-provisioner
root@node81[15:30:19]:/data/k8s/logging# cat npv.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
namespace: logging
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
nodeSelector:
kubernetes.io/hostname: node81
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: eslog/nfs #PROVISIONER_NAME是eslog/nfs,eslog/nfs需要跟后面的storageclass的provisinoer保持一致
- name: NFS_SERVER
value: 192.168.0.81 #这个需要写nfs服务端所在的ip地址,此处为k8s-elasticsearch地址
- name: NFS_PATH
value: /data/eslog #共享目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.0.81 #这个是nfs服务端的ip,大家需要写自己的nfs地址
path: /data/eslog
创建storageclass
root@node81[15:32:22]:/data/k8s/logging# cat class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: es-block-storage
namespace: logging
provisioner: eslog/nfs
#root@node81[15:33:22]:/data/k8s/logging# kubectl apply -f npv.yaml
deployment.apps/nfs-provisioner created
root@node81[15:33:52]:/data/k8s/logging#
root@node81[15:33:53]:/data/k8s/logging#
#root@node81[15:33:53]:/data/k8s/logging# kubectl apply -f class.yaml
storageclass.storage.k8s.io/es-block-storage created
root@node81[15:43:31]:/data/k8s/logging# kubectl get po -n logging
NAME READY STATUS RESTARTS AGE
nfs-provisioner-68b57cb787-6pwcq 1/1 Running 0 9m49s
部署elasticsearch
以stateful部署elasticsearch,有状态有序的
root@node81[15:49:33]:/data/k8s/logging# cat es.yaml
# RBAC authn and authz
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch
namespace: logging
labels:
app: elasticsearch
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: elasticsearch
labels:
app: elasticsearch
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: elasticsearch
labels:
app: elasticsearch
subjects:
- kind: ServiceAccount
name: elasticsearch
namespace: logging
apiGroup: ""
roleRef:
kind: ClusterRole
name: elasticsearch
apiGroup: ""
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-cluster
namespace: logging
labels:
app: elasticsearch
spec:
serviceName: elasticsearch
replicas: 3
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
serviceAccountName: elasticsearch
containers:
- name: elasticsearch
image: elasticsearch:7.12.1
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 1000m #单个容器最多可使用1个CPU
requests:
cpu: 100m #单个容器最少保证有0.1个CPU
ports:
- containerPort: 9200
name: rest #与handless service一致
protocol: TCP
- containerPort: 9300
name: inter-node
protocol: TCP
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
env:
- name: cluster.name #集群名称
value: k8s-logs
- name: node.name #节点名称,通过matedata.name获取
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: discovery.seed_hosts #设置在Elasticsearch集群中节点相互连接的发现方法,由于都在同一个 namespace 下面,我们可以将其缩短为es-cluster-[0,1,2].elasticsearch
value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch"
- name: cluster.initial_master_nodes
value: "es-cluster-0,es-cluster-1,es-cluster-2"
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m" #告诉JVM使用512MB的最小和最大堆
- name: network.host
value: "0.0.0.0" #设置所有服务器都能访问
- name: xpack.monitoring.collection.enabled
value: "true" #kibana获取es集群信息配置,不开启需安装metrics才能连接es和kibana
initContainers: #这里定义了几个在主应用程序之前运行的Init 容器,这些初始容器按照定义的顺序依次执行,执行完成后才会启动主应用容器。
- name: fix-permissions
image: busybox
imagePullPolicy: IfNotPresent
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
securityContext:
privileged: true
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
#第一个名为 fix-permissions 的容器用来运行 chown 命令,将 Elasticsearch 数据目录的用户和组更改为1000:1000(Elasticsearch 用户的 UID)。
#因为默认情况下,Kubernetes 用 root 用户挂载数据目录,这会使得 Elasticsearch 无法方法该数据目录
- name: increase-vm-max-map
image: busybox
imagePullPolicy: IfNotPresent
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
#第二个名为increase-vm-max-map 的容器用来增加操作系统对mmap计数的限制,默认情况下该值可能太低,导致内存不足的错误
- name: increase-fd-ulimit
image: busybox
imagePullPolicy: IfNotPresent
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
#最后一个初始化容器是用来执行ulimit命令增加打开文件描述符的最大数量的。
#此外 Elastisearch Notes for Production Use 文档还提到了由于性能原因最好禁用 swap,当然对于 Kubernetes 集群而言,最好也是禁用 swap 分区的
volumeClaimTemplates:
- metadata:
name: data
labels:
app: elasticsearch
spec:
accessModes: [ "ReadWriteOnce" ] #只能被 mount到单个节点上进行读写
storageClassName: es-block-storage #需要提前创建该对象,我们这里使用的 NFS 作为存储后端,所以需要安装一个对应的 provisioner驱动
resources:
requests:
storage: 10Gi
root@node81[15:49:37]:/data/k8s/logging# kubectl apply -f es.yaml
statefulset.apps/es-cluster created
排查异常
root@node81[16:03:39]:/data/k8s/logging# kubectl get po -n logging
NAME READY STATUS RESTARTS AGE
es-cluster-0 0/1 Init:0/3 0 13m
nfs-provisioner-68b57cb787-6pwcq 1/1 Running 0 29m
root@node81[16:03:50]:/data/k8s/logging#
root@node81[16:03:51]:/data/k8s/logging#
root@node81[16:03:51]:/data/k8s/logging# kubectl get pvc -n logging
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data-es-cluster-0 Pending es-block-storage 14m
root@node81[16:04:39]:/data/k8s/logging# kubectl get pv -n logging
No resources found
增加RemoveSelfLink
root@node81[16:11:54]:/data/k8s/logging# vim /etc/kubernetes/manifests/kube-apiserver.yaml
#在spec.containers.command结尾处增加:
- --feature-gates=RemoveSelfLink=false
service kubelet restart
root@node81[16:15:13]:/data/k8s/logging# kubectl get po -n logging
NAME READY STATUS RESTARTS AGE
es-cluster-0 1/1 Running 0 25m
es-cluster-1 1/1 Running 0 83s
es-cluster-2 1/1 Running 0 50s
nfs-provisioner-68b57cb787-6pwcq 1/1 Running 0 41m
root@node81[16:15:27]:/data/k8s/logging#
root@node81[16:15:38]:/data/k8s/logging# kubectl get pvc -n logging
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data-es-cluster-0 Bound pvc-9f18f804-db0d-4cfe-bcc6-50b3f66350de 10Gi RWO es-block-storage 25m
data-es-cluster-1 Bound pvc-1922a626-2911-4afb-a961-2ad56d6846e1 10Gi RWO es-block-storage 100s
data-es-cluster-2 Bound pvc-ef254b63-d6db-4228-b64d-68f0b5862f98 10Gi RWO es-block-storage 67s
root@node81[16:15:44]:/data/k8s/logging#
root@node81[16:15:46]:/data/k8s/logging#
root@node81[16:15:46]:/data/k8s/logging# kubectl get pv -n logging
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-1922a626-2911-4afb-a961-2ad56d6846e1 10Gi RWO Delete Bound logging/data-es-cluster-1 es-block-storage 110s
pvc-9f18f804-db0d-4cfe-bcc6-50b3f66350de 10Gi RWO Delete Bound logging/data-es-cluster-0 es-block-storage 2m25s
pvc-ef254b63-d6db-4228-b64d-68f0b5862f98 10Gi RWO Delete Bound logging/data-es-cluster-2 es-block-storage 77s
root@node81[16:15:54]:/data/k8s/logging#
root@node81[16:16:00]:/data/k8s/logging# kubectl get sc -n logging
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
es-block-storage eslog/nfs Delete Immediate false 42m
验证es
root@node81[16:22:45]:/data/k8s/logging# kubectl port-forward es-cluster-0 9200:9200 --namespace=logging
Forwarding from 127.0.0.1:9200 -> 9200
Forwarding from [::1]:9200 -> 9200
Handling connection for 9200
#另外开一个窗口
root@node81[16:23:05]:/data/k8s/logging# curl http://localhost:9200/_cluster/state?pretty | grep name
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 "cluster_name" : "k8s-logs",
"name" : "es-cluster-1",
"name" : "es-cluster-2",
"name" : "es-cluster-0",
"partition_field_name" : {
"by_field_name" : {
"field_name" : {
"over_field_name" : {
kibana部署
root@node81[16:27:15]:/data/k8s/logging# cat kibana.yaml
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: logging
labels:
app: kibana
spec:
type: NodePort #为了测试方便,我们将 Service 设置为了 NodePort 类型
ports:
- port: 5601
selector:
app: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: logging
labels:
app: kibana
spec:
replicas: 1
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
containers:
- name: kibana
image: kibana:7.12.1 #kibana版本需要与es版本一致
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch:9200 #设置为handless service 的名称
- name: SERVER_NAME
value: kibana
ports:
- containerPort: 5601
#root@node81[16:28:21]:/data/k8s/logging# kubectl apply -f kibana.yaml
service/kibana created
deployment.apps/kibana created
root@node81[16:31:57]:/data/k8s/logging# kubectl get po -n logging -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
es-cluster-0 1/1 Running 0 42m 10.96.146.199 node81 <none> <none>
es-cluster-1 1/1 Running 0 18m 10.96.146.200 node81 <none> <none>
es-cluster-2 1/1 Running 0 17m 10.96.146.201 node81 <none> <none>
kibana-64b74d78d6-vlfdn 1/1 Running 0 3m52s 10.96.201.129 node83 <none> <none>
nfs-provisioner-68b57cb787-6pwcq 1/1 Running 0 58m 10.96.146.198 node81 <none> <none>
root@node81[16:32:23]:/data/k8s/logging#
root@node81[16:32:26]:/data/k8s/logging# kubectl get svc -n logging -owide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
elasticsearch ClusterIP None <none> 9200/TCP,9300/TCP 71m app=elasticsearch
kibana NodePort 10.96.120.247 <none> 5601:30285/TCP 4m7s app=kibana
可以使用192.168.0.81:30285 访问kibana页面
fluentd部署
用daemonset控制器部署fluentd组件,这样可以保证集群中的每个节点都可以运行同样fluentd的pod副本
root@node81[16:33:57]:/data/k8s/logging# cat fluentd-configmap.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-config
namespace: logging
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
# to the docker logs for pods in the /var/log/containers directory on the host.
# If running this fluentd configuration in a Docker container, the /var/log
# directory should be mounted in the container.
#
# These logs are then submitted to Elasticsearch which assumes the
# installation of the fluent-plugin-elasticsearch & the
# fluent-plugin-kubernetes_metadata_filter plugins.
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
#
# Example
# =======
# A line in the Docker log file might look like this JSON:
#
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
# The time_format specification below makes sure we properly
# parse the time format produced by Docker. This will be
# submitted to Elasticsearch and should appear like:
# $ curl 'http://elasticsearch:9200/_search?pretty'
# ...
# {
# "_index" : "logstash-2014.09.25",
# "_type" : "fluentd",
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
# "_score" : 1.0,
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
# "stream":"stderr","tag":"docker.container.all",
# "@timestamp":"2014-09-25T22:45:50+00:00"}
# },
# ...
#
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
# record & add labels to the log record if properly configured. This enables users
# to filter & search logs on any metadata.
# For example a Docker container's logs might be in the directory:
#
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
#
# and in the file:
#
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# where 997599971ee6... is the Docker ID of the running container.
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
# in the /var/log/containers directory which includes the pod name and the Kubernetes
# container name:
#
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
#
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# This results in the tag:
#
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
# which are added to the log message as a kubernetes field object & the Docker container ID
# is also added under the docker field object.
# The final tag is:
#
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# And the final log record look like:
#
# {
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z",
# "kubernetes": {
# "namespace": "default",
# "pod_name": "synthetic-logger-0.25lps-pod",
# "container_name": "synth-lgr"
# },
# "docker": {
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
# }
# }
#
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
# Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
@id fluentd-containers.log
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
tag raw.kubernetes.*
read_from_head true
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
# Concatenate multi-line logs
<filter **>
@id filter_concat
@type concat
key message
multiline_end_regexp /\n$/
separator ""
</filter>
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
@id filter_kubernetes_metadata
@type kubernetes_metadata
</filter>
# Fixes json fields in Elasticsearch
<filter kubernetes.**>
@id filter_parser
@type parser
key_name log
reserve_data true
remove_key_name_field true
<parse>
@type multi_format
<pattern>
format json
</pattern>
<pattern>
format none
</pattern>
</parse>
</filter>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
@id minion
@type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
@id startupscript.log
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
tag startupscript
</source>
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
@id docker.log
@type tail
format /^time="(?<time>[^"]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/es-docker.log.pos
tag docker
</source>
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
@id etcd.log
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
path /var/log/etcd.log
pos_file /var/log/es-etcd.log.pos
tag etcd
</source>
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
@id kubelet.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kubelet.log
pos_file /var/log/es-kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
@id kube-proxy.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-proxy.log
pos_file /var/log/es-kube-proxy.log.pos
tag kube-proxy
</source>
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
@id kube-apiserver.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-apiserver.log
pos_file /var/log/es-kube-apiserver.log.pos
tag kube-apiserver
</source>
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
@id kube-controller-manager.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-controller-manager.log
pos_file /var/log/es-kube-controller-manager.log.pos
tag kube-controller-manager
</source>
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
@id kube-scheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-scheduler.log
pos_file /var/log/es-kube-scheduler.log.pos
tag kube-scheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@id glbc.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/es-glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@id cluster-autoscaler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/es-cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
# Logs from systemd-journal for interesting services.
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
@id journald-docker
@type systemd
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
path /var/log/journald-docker.pos
</storage>
read_from_head true
tag docker
</source>
<source>
@id journald-container-runtime
@type systemd
matches [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
<storage>
@type local
persistent true
path /var/log/journald-container-runtime.pos
</storage>
read_from_head true
tag container-runtime
</source>
<source>
@id journald-kubelet
@type systemd
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
path /var/log/journald-kubelet.pos
</storage>
read_from_head true
tag kubelet
</source>
<source>
@id journald-node-problem-detector
@type systemd
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
<storage>
@type local
persistent true
path /var/log/journald-node-problem-detector.pos
</storage>
read_from_head true
tag node-problem-detector
</source>
<source>
@id kernel
@type systemd
matches [{ "_TRANSPORT": "kernel" }]
<storage>
@type local
persistent true
path /var/log/kernel.pos
</storage>
<entry>
fields_strip_underscores true
fields_lowercase true
</entry>
read_from_head true
tag kernel
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
@id forward
@type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
@id prometheus
@type prometheus
</source>
<source>
@id monitor_agent
@type monitor_agent
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@id prometheus_monitor
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@id prometheus_output_monitor
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for in_tail plugin
<source>
@id prometheus_tail_monitor
@type prometheus_tail_monitor
<labels>
host ${hostname}
</labels>
</source>
output.conf: |-
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
type_name _doc
include_tag_key true
host elasticsearch #推送到es的地址,这里填写handlessserv名称
port 9200
logstash_format true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size 2M
total_limit_size 500M
overflow_action block
</buffer>
</match>
root@node81[16:38:28]:/data/k8s/logging# cat fluentd.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: logging
labels:
app: fluentd
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluentd
labels:
app: fluentd
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: logging
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: logging
labels:
app: fluentd
spec:
selector:
matchLabels:
app: fluentd
template:
metadata:
labels:
app: fluentd
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
tolerations: #设置容忍,允许部署到master节点,获取master数据
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd
image: quay.io/fluentd_elasticsearch/fluentd:v3.1.0 #镜像选择不同,下面的env环境变量也有不同的填写方式,这里使用的镜像使用configmap的形式,将配置文件挂载到容器内
imagePullPolicy: IfNotPresent
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers #宿主机docker文件目录,这里我有修改
- name: config-volume #对应上方容器挂载名称
configMap:
name: fluentd-config #名称对应configmap 中name
root@node81[16:39:12]:/data/k8s/logging# kubectl apply -f fluentd-configmap.yaml
root@node81[16:39:22]:/data/k8s/logging# kubectl apply -f fluentd.yaml
serviceaccount/fluentd created
clusterrole.rbac.authorization.k8s.io/fluentd created
clusterrolebinding.rbac.authorization.k8s.io/fluentd created
root@node81[16:39:32]:/data/k8s/logging# kubectl get sts -n logging -owide
NAME READY AGE CONTAINERS IMAGES
es-cluster 3/3 51m elasticsearch elasticsearch:7.12.1
root@node81[16:40:55]:/data/k8s/logging#
root@node81[16:42:00]:/data/k8s/logging# kubectl get ds -n logging -o wide
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
fluentd 3 3 3 3 3 <none> 2m31s fluentd fluent/fluentd-kubernetes-daemonset:v1.4.2-debian-elasticsearch-1.1 app=fluentd
root@node81[18:42:05]:/data/k8s/logging# kubectl get po -n logging
NAME READY STATUS RESTARTS AGE
es-cluster-0 1/1 Running 0 165m
es-cluster-1 1/1 Running 0 165m
es-cluster-2 1/1 Running 0 165m
fluentd-5lxvm 1/1 Running 0 75m
fluentd-9pmzz 1/1 Running 0 75m
fluentd-pbpgg 1/1 Running 0 75m
kibana-6bdd96c7c4-sx7bh 1/1 Running 0 147m
nfs-provisioner-68b57cb787-6pwcq 1/1 Running 0 27h
剩下的和网上其他的教程一样,访问kibana添加logstash即可。
root@node81[16:04:25]:/data/k8s/logging# kubectl describe pvc data-es-cluster-0 -n logging
Name: data-es-cluster-0
Namespace: logging
StorageClass: es-block-storage
Status: Pending
Volume:
Labels: app=elasticsearch
Annotations: control-plane.alpha.kubernetes.io/leader:
{"holderIdentity":"129b333e-d0fe-11ec-846e-9ebd5802a6d0","leaseDurationSeconds":15,"acquireTime":"2022-05-11T07:49:55Z","renewTime":"2022-...
volume.beta.kubernetes.io/storage-provisioner: eslog/nfs
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Used By: es-cluster-0
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ExternalProvisioning 4m43s (x203 over 14m) persistentvolume-controller waiting for a volume to be created, either by external provisioner "eslog/nfs" or manually created by system administrator
#root@node81[16:06:43]:/data/k8s/logging# kubectl logs nfs-provisioner-68b57cb787-6pwcq -n logging
I0511 08:06:54.923658 1 controller.go:1068] scheduleOperation[lock-provision-logging/data-es-cluster-0[9f18f804-db0d-4cfe-bcc6-50b3f66350de]]
I0511 08:06:54.951133 1 controller.go:869] cannot start watcher for PVC logging/data-es-cluster-0: events is forbidden: User "system:serviceaccount:logging:nfs-provisioner" cannot list resource "events" in API group "" in the namespace "logging"
E0511 08:06:54.951201 1 controller.go:682] Error watching for provisioning success, can't provision for claim "logging/data-es-cluster-0": events is forbidden: User "system:serviceaccount:logging:nfs-provisioner" cannot list resource "events" in API group "" in the namespace "logging"
I0511 08:06:54.951217 1 leaderelection.go:156] attempting to acquire leader lease...
I0511 08:06:54.970921 1 leaderelection.go:178] successfully acquired lease to provision for pvc logging/data-es-cluster-0
I0511 08:06:54.971103 1 controller.go:1068] scheduleOperation[provision-logging/data-es-cluster-0[9f18f804-db0d-4cfe-bcc6-50b3f66350de]]
E0511 08:06:54.975126 1 controller.go:766] Unexpected error getting claim reference to claim "logging/data-es-cluster-0": selfLink was empty, can't make reference
waiting for a volume to be created, either by external provisioner “fuseim.pri/ifs” or manually created by system administrator
E0511 08:06:54.975126 1 controller.go:766] Unexpected error getting claim reference to claim "logging/data-es-cluster-0": selfLink was empty, can't make reference
以上表示k8s 1.20以后禁用了selfLink
解决办法:
root@node81[16:11:54]:/data/k8s/logging# vim /etc/kubernetes/manifests/kube-apiserver.yaml
#在spec.containers.command结尾处增加:
- --feature-gates=RemoveSelfLink=false
service kubelet restart
注:一定要参考官方给的文档教程
参考:https://github.com/kubernetes/kubernetes/tree/release-1.23/cluster/addons/fluentd-elasticsearch
参考:https://blog.csdn.net/weixin_39603190/article/details/120970536
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】凌霞软件回馈社区,博客园 & 1Panel & Halo 联合会员上线
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】博客园社区专享云产品让利特惠,阿里云新客6.5折上折
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· DeepSeek “源神”启动!「GitHub 热点速览」
· 微软正式发布.NET 10 Preview 1:开启下一代开发框架新篇章
· 我与微信审核的“相爱相杀”看个人小程序副业
· C# 集成 DeepSeek 模型实现 AI 私有化(本地部署与 API 调用教程)
· DeepSeek R1 简明指南:架构、训练、本地部署及硬件要求