收集k8s日志

构建一个logstash镜像并使用daemonset的方式部署

[root@k8s-master 1.logstash-image-Dockerfile]# bash build-commond.sh
[root@k8s-master 1.logstash-image-Dockerfile]# ll
total 16
-rw-r--r-- 1 root root 758 May 29 11:45 app1.conf
-rw-r--r-- 1 root root 176 Sep 15 14:55 build-commond.sh
-rw-r--r-- 1 root root 216 Sep 15 14:55 Dockerfile
-rw-r--r-- 1 root root  92 May 23 11:03 logstash.yml
[root@k8s-master 1.logstash-image-Dockerfile]# cat app1.conf 
input {
  file {
    path => "/var/lib/docker/containers/*/*-json.log"
    start_position => "beginning"
    type => "jsonfile-daemonset-applog"
  }

  file {
    path => "/var/log/*.log"
    start_position => "beginning"
    type => "jsonfile-daemonset-syslog"
  }
}

output {
  if [type] == "jsonfile-daemonset-applog" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384  #logstash每次向ES传输的数据量大小,单位为字节
      codec => "${CODEC}" 
   } }

  if [type] == "jsonfile-daemonset-syslog" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384
      codec => "${CODEC}" #系统日志不是json格式
  }}
}
[root@k8s-master 1.logstash-image-Dockerfile]# cat logstash.yml 
http.host: "0.0.0.0"
#xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]
[root@k8s-master 1.logstash-image-Dockerfile]# 
[root@k8s-master 1.logstash-image-Dockerfile]# cat Dockerfile 
FROM logstash:8.3.2


USER root
WORKDIR /usr/share/logstash 
#RUN rm -rf config/logstash-sample.conf
ADD logstash.yml /usr/share/logstash/config/logstash.yml
ADD app1.conf /usr/share/logstash/pipeline/logstash.conf 
[root@k8s-master 1.logstash-image-Dockerfile]# cat build-commond.sh 
#!/bin/bash

docker build -t harbor.yangzhenyu.com/baseimages/logstash:v8.3.2-json-file-log-v4 .

docker push harbor.yangzhenyu.com/baseimages/logstash:v8.3.2-json-file-log-v4
[root@k8s-master 1.logstash-image-Dockerfile]#
[root@k8s-master 1.daemonset-logstash]# kubectl apply -f 2.DaemonSet-logstash.yaml 
[root@k8s-master 1.daemonset-logstash]# cat 2.DaemonSet-logstash.yaml 
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: logstash-elasticsearch
  namespace: kube-system
  labels:
    k8s-app: logstash-logging
spec:
  selector:
    matchLabels:
      name: logstash-elasticsearch
  template:
    metadata:
      labels:
        name: logstash-elasticsearch
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      containers:
      - name: logstash-elasticsearch
        image: harbor.yangzhenyu.com/baseimages/logstash:v8.3.2-json-file-log-v4 
        env:
        - name: "KAFKA_SERVER"
          value: "10.0.0.13:9092,10.0.0.15:9092,10.0.0.12:9092"
        - name: "TOPIC_ID"
          value: "jsonfile-log-topic"
        - name: "CODEC"
          value: "json"
#        resources:
#          limits:
#            cpu: 1000m
#            memory: 1024Mi
#          requests:
#            cpu: 500m
#            memory: 1024Mi
        volumeMounts:
        - name: varlog
          mountPath: /var/log
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: false
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers

安装kafka和zk

安装jdk
# wget https://download.oracle.com/java/18/latest/jdk-18_linux-x64_bin.rpm
# rpm -ivh jdk-18_linux-x64_bin.rpm
# java -version

下载kafka
# wget https://dlcdn.apache.org/kafka/3.2.0/kafka_2.13-3.2.0.tgz

将kafka传输到另外两台机器
# scp kafka_2.13-3.2.0.tgz 10.0.0.15:/root/
# scp kafka_2.13-3.2.0.tgz 10.0.0.12:/root/

解压
# tar -zxvf kafka_2.13-3.2.0.tgz -C /usr/local/

创建日志和数据目录
# mkdir -p /home/kafka/data
# mkdir -p /home/kafka/logs

修改kafka配置文件,注意:配置文件需要填写zk地址需要提前装好zk,其他两台需要修改broker.id
# vim server.properties
# cat /usr/local/kafka_2.13-3.2.0/config/server.properties | grep -v '#' | grep -v '^$'
broker.id=0
listeners=PLAINTEXT://10.0.0.13:9092
advertised.listeners=PLAINTEXT://10.0.0.13:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/home/kafka/logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.0.0.13:2181,10.0.0.15:2181,10.0.0.12:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0

启动kafka
/usr/local/kafka_2.13-3.2.0/bin/kafka-server-start.sh -daemon /usr/local/kafka_2.13-3.2.0/config/server.properties

 

下载并安装

wget https://artifacts.elastic.co/downloads/logstash/logstash-8.3.2-x86_64.rpm
yum localinstall logstash-8.3.2-x86_64.rpm -y

配置logstash.yml

[root@VM-0-3-centos ~]# vim /etc/logstash/logstash.yml 
input {
  kafka {
    bootstrap_servers => "10.0.0.13:9092,10.0.0.15:9092,10.0.0.12:9092"
    topics => ["jsonfile-log-topic"]
    codec => "json"
  }
}

output {
  #if [fields][type] == "app1-access-log" {
  if [type] == "jsonfile-daemonset-applog" {
    elasticsearch {
      hosts => ["10.0.0.14:9200","10.0.0.3:9200","10.0.0.5:9200"]
      index => "jsonfile-daemonset-applog-%{+YYYY.MM.dd}"
    }}

  if [type] == "jsonfile-daemonset-syslog" {
    elasticsearch {
      hosts => ["10.0.0.14:9200","10.0.0.3:9200","10.0.0.5:9200"]
      index => "jsonfile-daemonset-syslog-%{+YYYY.MM.dd}"
    }}

}
[root@VM-0-3-centos ~]# systemctl restart logstash.service 
[root@VM-0-3-centos ~]# systemctl enable logstash.service 

安装ES

安装jdk
# wget https://download.oracle.com/java/18/latest/jdk-18_linux-x64_bin.rpm
# rpm -ivh jdk-18_linux-x64_bin.rpm
# java -version

下载es
# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.3.2-x86_64.rpm

scp到另外两台
# scp elasticsearch-8.3.2-x86_64.rpm 10.0.0.3:/root/
# scp elasticsearch-8.3.2-x86_64.rpm 10.0.0.5:/root/

安装es
# rpm -ivh elasticsearch-8.3.2-x86_64.rpm

编辑配置文件 另外两台修改node.name
# vim /etc/elasticsearch/elasticsearch.yml
# cat /etc/elasticsearch/elasticsearch.yml | grep -v '#' | grep -v '^$'
cluster.name: my-application
node.name: node-1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 10.0.0.14
http.port: 9200
discovery.seed_hosts: ["10.0.0.14", "10.0.0.3", "10.0.0.5"]
cluster.initial_master_nodes: ["10.0.0.14", "10.0.0.3", "10.0.0.5"]
xpack.security.enabled: false
xpack.security.enrollment.enabled: true
xpack.security.http.ssl:
  enabled: false
  keystore.path: certs/http.p12
xpack.security.transport.ssl:
  enabled: false
  verification_mode: certificate
  keystore.path: certs/transport.p12
  truststore.path: certs/transport.p12
http.host: 0.0.0.0


启动es
systemctl restart elasticsearch.service 

查看端口9200
netstat -tunlp | grep 9200

# curl 127.0.0.1:9200

安装kibana

下载kibana
# wget https://artifacts.elastic.co/downloads/kibana/kibana-8.3.2-x86_64.rpm

安装
# rpm -ivh kibana-8.3.2-x86_64.rpm

编辑配置文件
# vim /etc/kibana/kibana.yml 
# cat /etc/kibana/kibana.yml | grep -v '#' | grep -v '^$'
server.port: 5601
server.host: "10.0.0.17"
elasticsearch.hosts: ["http://10.0.0.14:9200", "http://10.0.0.3:9200", "http://10.0.0.5:9200"]
logging:
  appenders:
    file:
      type: file
      fileName: /var/log/kibana/kibana.log
      layout:
        type: json
  root:
    appenders:
      - default
      - file
pid.file: /run/kibana/kibana.pid
i18n.locale: "zh-CN"

启动服务
# systemctl restart kibana

查看端口
# netstat -tunlp | grep 5601

用浏览器直接访问kibana

 

posted @ 2022-09-16 10:12  Maniana  阅读(82)  评论(0编辑  收藏  举报