使用filebeat 替代logstash 收集日志kafka

在web服务器
有就停止

root@long:~# systemctl stop logstash

监控单个日志配置:
上传deb包,安装

[root@es-web2 src]# dpkg -i filebeat-7.12.1-amd64.deb

先启动zookeeper

[root@mq1 ~]# /usr/local/zookeeper/bin/zkServer.sh restart
[root@mq2 ~]# /usr/local/zookeeper/bin/zkServer.sh restart
[root@mq3 ~]# /usr/local/zookeeper/bin/zkServer.sh restart

启动kafka

[root@mq1 ~]# /apps/kafka/bin/kafka-server-start.sh -daemon /apps/kafka/config/server.properties

[root@mq2 ~]# /apps/kafka/bin/kafka-server-start.sh -daemon /apps/kafka/config/server.properties

[root@mq3 ~]# /apps/kafka/bin/kafka-server-start.sh -daemon /apps/kafka/config/server.properties

filebeat改配置文件

root@long:~# grep -v "#" /etc/filebeat/filebeat.yml| grep -v "^$"

filebeat.inputs:
- type: log
  enabled: True
  paths:
    - /apps/nginx/logs/error.log
  fields:
    app: nginx-errorlog
    group: n223

- type: log
  enabled: True
  paths:
    - /var/log/nginx/access.log
  fields:
    app: nginx-accesslog
    group: n125

output.kafka:
  hosts: ["172.31.2.41:9092","172.31.2.42:9092","172.31.2.43:9092"]
  topic: "long-mm123-nginx"
  partition.round_robin:
     reachable_only: true
  required_acks: 1
  compression: gzip
  max_message_bytes: 1000000

重启

root@long:~# systemctl restart filebeat

logstash配置文件

root@long:~# vim /etc/logstash/conf.d/filebeat-nginx-log-redis.conf

input {
  kafka {
    bootstrap_servers => "172.31.2.41:9092,172.31.2.42:9092,172.31.2.43:9092"
    topics => "long-mm123-nginx"
    codec => "json"
  }
}

output {
  if [fields][app] == "nginx-errorlog" {                                 
    elasticsearch {
      hosts => ["172.31.2.101:9200"]
      index => "filebeat-kafka-nginx-errorlog-%{+YYYY.MM.dd}"
  }}
  if [fields][app] == "nginx-accesslog" {                                 
    elasticsearch {
      hosts => ["172.31.2.101:9200"]
      index => "filebeat-kafka-nginx-accesslog-%{+YYYY.MM.dd}"
  }}
}

重启

root@long:~# systemctl restart logstash

kafka工具

加入kibana

posted @ 2021-10-07 00:41  空白的旋律  阅读(110)  评论(0编辑  收藏  举报