5.ES收集日志
安装nginx
# 安装nginx源,contos默认没有nginx源
[root@cs tmp]# rpm -Uvh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm
[root@cs tmp]# yum install nginx httpd-tools -y
# 检测nginx是否安装成功
[root@cs tmp]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
# 启动nginx
[root@cs tmp]# systemctl start nginx
[root@cs tmp]# yum install lsof
# 查看nginx启动情况
[root@cs tmp]# lsof -i:80
目录结构:
/var/log/nginx/access.log # nginx 的普通日志
/var/log/nginx/error.log # nginx 错误日志
/usr/share/nginx/ # nginx 安装目录
# 压测,生成一些数据, -n表示访问多少次;-c表示多少个并发,注意,路径最后一定跟/
ab -n 100 -c 10 http://127.0.0.1/
# 查看压测记录
[root@cs ~]# cat /var/log/nginx/access.log
安装filebeat
# 下载或长传filebeat文件
[root@cs tmp]# wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.8.15-x86_64.rpm
[root@cs tmp]# rz filebeat-6.8.15-x86_64.rpm
# 解压安装
[root@cs tmp]# rpm -ivh filebeat-6.8.15-x86_64.rpm
[root@cs tmp]# rpm -qc filebeat
/etc/filebeat/filebeat.yml # filebeat 主配置文件
/var/log/filebeat/filebeat # filebeat 日志文件
日志收集环境
centos7.9 + elasticsearch-6.8.15 + kibana-6.8.15 + filebeat-6.8.15 + elasticserach-head-chrome
# elk 都在一台机器上
关闭防火墙:
# 查看防火墙状态
systemctl status firewalld.service
# 关闭防火墙
systemctl stop firewalld.service
# 禁止开机启动防火墙
systemctl disable firewalld.service
# 关闭selinux
[root@r ~]# sed -i.ori 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
1.filebeat收集普通的nginx日志
编辑filebeat配置文件,修改如下内容:
filebeat.inputs:
- type: log
# 打开日志收集功能
enabled: true
# 要收集的日志路径
paths:
- /var/log/nginx/access.log
output.elasticsearch:
# 输出到指定es地址
hosts: ["localhost:9200"]
[root@cs tmp]# vim /etc/filebeat/filebeat.yml
[root@cs tmp]# egrep -v "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.elasticsearch:
hosts: ["localhost:9200"]
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
# 启动filebeat
[root@cs tmp]# systemctl start filebeat
# 监听filebeat日志信息,没有发现报错,说明配置的没问题
[root@cs tmp]# tail -f /var/log/filebeat/filebeat
之后,访问nginx,在kibana的management中添加索引,Create index pattern,有提示filebeat-6.8.15-2021.05.07,
配置好后,在discover栏,就可以看到图标及日志信息了,在这个页面也可以进行一些其他的配置.
2.filebeat收集json格式的nginx日志
期望的json格式的nginx日志,因为后续更好分析和处理
{"time_local": "26/Apr/2021:16:57:33 +0800","remote_addr": "10.0.0.200","referer": "-","request": "GET /home.html HTTP/1.0","status": "404","bytes": "3650","agent": "ApacheBench/2.3","x_forwarded": "-","up_addr": "-","up_host": "-","upstream_time": "-","request_time": "0.000"}
1.修改nginx配置
需要修改的内容
# 修改日志格式为json,使其生成的每条日志都是json格式的
log_format my_json '{"time_local": "$time_local",'
'"remote_addr": "$remote_addr",'
'"referer": "$http_referer",'
'"request": "$request",'
'"status": "$status",'
'"bytes": "$body_bytes_sent",'
'"agent": "$http_user_agent",'
'"x_forwarded": "$http_x_forwarded_for",'
'"up_addr": "$upstream_addr",'
'"up_host": "$upstream_http_host",'
'"upstream_time": "$upstream_response_time",'
'"request_time": "$request_time"}';
# 使用json格式 nginx输出的access.log日志
access_log /var/log/nginx/access.log my_json;
[root@cs ~]# vim /etc/nginx/nginx.conf
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format my_json '{"time_local": "$time_local",'
'"remote_addr": "$remote_addr",'
'"referer": "$http_referer",'
'"request": "$request",'
'"status": "$status",'
'"bytes": "$body_bytes_sent",'
'"agent": "$http_user_agent",'
'"x_forwarded": "$http_x_forwarded_for",'
'"up_addr": "$upstream_addr",'
'"up_host": "$upstream_http_host",'
'"upstream_time": "$upstream_response_time",'
'"request_time": "$request_time"}';
access_log /var/log/nginx/access.log my_json;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
# 很重要的一步,nginx -t 检测配置正确性也没有问题
[root@cs ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
# 清空原有日志
[root@cs ~]# > /var/log/nginx/access.log
# 重新加载nginx
[root@cs ~]# systemctl reload nginx
# 查看nginx启动情况
[root@cs ~]# lsof -i:80
2.修改filebeat配置
[root@cs ~]# vim /etc/filebeat/filebeat.yml
# 就多了两行这个,意思是filebeat将日志解析为json格式后,再传给es
json.keys_under_root: true
json.overwrite_keys: true
# 修改后的配置文件长这样:
[root@cs ~]# egrep -v "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
# 注意位置
json.keys_under_root: true
json.overwrite_keys: true
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.elasticsearch:
hosts: ["10.0.0.200:9200"]
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
# 重新启动filebeat
[root@cs ~]# systemctl restart filebeat
# 监听filebeat日志信息,没有发现报错,说明配置的没问题
[root@cs ~]# tail -f /var/log/filebeat/filebeat
kibana操作,由于原数据集由普通文本转换为json格式的文本,那么es中的相应索引存储的数据也要删除,这里直接删除索引,再重新添加索引。让后续产生的日志都是json格式的了。
可以通过命令创建测压数据
[root@cs ~]# ab -n 10 -c 10 http://127.0.0.1/
[root@cs ~]# ab -n 10 -c 10 http://127.0.0.1/index.html
3.filebeat按需分割日志
filebeat自定义索引名称,配置以按月分割日志
首先,nginx生成的日志还是json格式的;es和kibana暂时不做更改,保持正常运行即可。
1.修改filebeat配置文件,注意缩进和位置:
# 默认的index模板
index: "filebeat-%{[beat.version]}-%{+yyyy.MM.dd}"
# 根据需要进行修改 - 自定义索引模板
index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}"
setup.template.name: "nginx"
# 使用nginx-开头的索引
setup.template.pattern: "nginx-*"
# 禁用默认模板
setup.template.enabled: false
# 让我自己的模板覆盖掉默认模板
setup.template.overwrite: true
修改后的配置文件:
[root@cs ~]# vim /etc/filebeat/filebeat.yml
[root@cs ~]# egrep -v "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
json.keys_under_root: true
json.overwrite_keys: true
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.elasticsearch:
hosts: ["localhost:9200"]
# 修改文件的位置
index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}"
setup.template.name: "nginx"
setup.template.pattern: "nginx-*"
setup.template.enabled: false
setup.template.overwrite: true
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
# 重启filebeat
[root@cs ~]# systemctl restart filebeat
# 监听filebeat日志信息,没有发现报错,说明配置的没问题
[root@cs ~]# tail -f /var/log/filebeat/filebeat
# 压测数据
[root@cs ~]# ab -n 10 -c 10 http://127.0.0.1/
kibana中management添加索引,discover查看即可
4.filebeat同时收集多种日志
如何同时收集nginx日志的access日志和error日志:
# 查看日志是否存在
[root@cs ~]# ls /var/log/nginx/
access.log error.log
思路:
在filebeat的inputs中,我们可以填写多种日志类型的配置,然后给每种日志类型打个标签;在output到es选项,建立多个索引,然后根据日志标签的不同,分别写入不同的索引中即可。
修改filebeat配置
需要配置的地方,注意缩进,位置
filebeat.inputs:
# 添加第一个日志access.log
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
# 由于access日志文件类型是json,注意加声明
json.keys_under_root: true
json.overwrite_keys: true
# 为当前日志打个标签
tags: ["access"]
# 添加第二个日志error.log
- type: log
enabled: true
paths:
- /var/log/nginx/error.log
# 为当前日志打个标签
tags: ["error"]
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.elasticsearch:
hosts: ["10.0.0.200:9200"]
# 自定义索引名称
indices:
- index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}"
# 将标签类型是 access 的日志写入到当前索引中
when.contains:
tags: "access"
- index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}"
# 将标签类型是 access 的日志写入到当前索引中
when.contains:
tags: "error"
setup.template.name: "nginx"
setup.template.pattern: "nginx-*"
setup.template.enabled: false
setup.template.overwrite: true
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
配置好的文件
[root@cs ~]# vim /etc/filebeat/filebeat.yml
# 配置好的配置文件
[root@cs ~]# egrep -v "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["access"]
- type: log
enabled: true
paths:
- /var/log/nginx/error.log
tags: ["error"]
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.elasticsearch:
hosts: ["localhost:9200"]
indices:
- index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "access"
- index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "error"
setup.template.name: "nginx"
setup.template.pattern: "nginx-*"
setup.template.enabled: false
setup.template.overwrite: true
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
# 重启filebeat
[root@cs ~]# systemctl restart filebeat
# 监听filebeat日志信息,没有发现报错,说明配置的没问题
[root@cs ~]# tail -f /var/log/filebeat/filebeat
# 压测数据 正确的和错误的数据
[root@cs ~]# ab -n 10 -c 10 http://127.0.0.1/
[root@cs ~]# ab -n 10 -c 10 http://127.0.0.1/asdf
kibana中management添加索引,discover查看即可
5.filebeat收集Tomcat日志
另起一台虚拟机,安装Tomcat和filebeat , tomcat也依赖Java环境,需要提前配置好
安装Tomcat
[root@cs ~]# yum install tomcat tomcat-webapps tomcat-admin-webapps tomcat-docs-webapp tomcat-javadoc -y
[root@cs ~]# systemctl start tomcat
前端直接访问8080就可以了,你在各个链接随便点几下,生成一些日志:
# 查看日志文件
[root@cs ~]# cat /var/log/tomcat/localhost_access_log.2021-05-08.txt
tomcat日志文件是以天为分割的,且是普通的文本,我们这里也可以配置为json格式的
# 先停止Tomcat
[root@cs ~]# systemctl stop tomcat
# 先把之前的普通文本日志内容清空
[root@cs ~]# > /var/log/tomcat/localhost_access_log.2021-05-08.txt
# 备份配置文件,以防万一
[root@cs ~]# cp /etc/tomcat/server.xml /etc/tomcat/server.xml.bak
# 编辑配置文件
[root@cs ~]# vim /etc/tomcat/server.xml
# 显示行号
:set nu
# 编辑第139行,pattern双引号里换成json格式:
pattern="{"client":"%h", "client user":"%l", "authenticated":"%u", &q uot;access time":"%t", "method":"%r", "status":"%s", "send bytes":"%b&qu ot;, "Query?string":"%q", "partner":"%{Referer}i", "Agent version":"%{User-Agent}i"}
" />
# 启动Tomcat
[root@cs ~]# systemctl start tomcat
# 监控日志
[root@cs ~]# tail -f /var/log/tomcat/localhost_access_log.2021-05-08.txt
修改filebeat配置
我这里的filebeat已经通过rpm的方式安装好了
[root@cs ~]# vim /etc/filebeat/filebeat.yml
[root@cs ~]# egrep -v "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
# 开启收集日志
enabled: true
paths:
# 收集日志路径 * 代表所有日期
- /var/log/tomcat/localhost_access_log.*.txt
# 允许json格式
json.keys_under_root: true
json.overwrite_keys: true
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.elasticsearch:
# 日志出口es端口
hosts: ["192.168.189.131:9200"]
# 自定义索引
index: "tomcat-access-%{[beat.version]}-%{+yyyy.MM}"
setup.template.name: "tomcat"
setup.template.pattern: "tomcat-*"
setup.template.enabled: false
setup.template.overwrite: true
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
# 启动filebeat
[root@cs ~]# systemctl start filebeat
# 监听filebeat日志信息,没有发现报错,说明配置的没问题
[root@cs ~]# tail -f /var/log/filebeat/filebeat
kibana中management添加Tomcat索引,discover查看即可
6.filebeat收集多行日志
多行日志该怎么收集呢?比如收集es自己的日志信息?正则匹配
修改filebeat配置
[root@cs data]# vim /etc/filebeat/filebeat.yml
[root@cs data]# egrep -v "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["access"]
- type: log
enabled: true
paths:
- /var/log/nginx/error.log
tags: ["error"]
# 添加收集es日志
- type: log
enabled: true
paths:
- /var/log/elasticsearch/elasticsearch.log
# 添加标签
tags: ["es"]
# 添加多行匹配规则
multiline.pattern: '^\['
multiline.negate: true
multiline.match: after
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
output.elasticsearch:
hosts: ["localhost:9200"]
indices:
- index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "access"
- index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "error"
# 添加es索引
- index: "es-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "es"
setup.template.name: "nginx"
setup.template.pattern: "nginx-*"
setup.template.enabled: false
setup.template.overwrite: true
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
# 启动filebeat
[root@cs ~]# systemctl start filebeat
# 监听filebeat日志信息,没有发现报错,说明配置的没问题
[root@cs ~]# tail -f /var/log/filebeat/filebeat
kibana中management添加es索引,discover查看即可,筛选log.flags:multiline,保存
7.filebeat收集日志模板modules
modules常见命令
# 查看帮助
filebeat modules help
# 查看所有的模板列表
filebeat modules list
# 启用指定的模板
filebeat modules enable nginx
# 关闭指定的模板
filebeat modules disable nginx
# 初始化模板
filebeat setup -e
这里启动nginx模板为例:
修改nginx配置模板
[root@cs ~]# vim /etc/filebeat/modules.d/nginx.yml
[root@cs ~]# egrep -v "#|^$" /etc/filebeat/modules.d/nginx.yml
- module: nginx
access:
enabled: true
# 添加日志文件路径
var.paths: ["/var/log/nginx/access.log"]
error:
enabled: true
var.paths: ["/var/log/nginx/error.log"]
修改filebeat配置
# 把filebeat连接kibana的ip端口写上
[root@cs ~]# vim /etc/filebeat/filebeat.yml
[root@cs ~]# egrep -v "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["access"]
- type: log
enabled: true
paths:
- /var/log/nginx/error.log
tags: ["error"]
- type: log
enabled: true
paths:
- /var/log/elasticsearch/elasticsearch.log
tags: ["es"]
multiline.pattern: '^\['
multiline.negate: true
multiline.match: after
- type: log
enabled: true
paths:
- /root/nginx_json.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["lufei"]
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
# 添加连接kibana地址
setup.kibana:
host: "192.168.189.135:5601"
output.elasticsearch:
hosts: ["localhost:9200"]
indices:
- index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "access"
- index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "error"
- index: "es-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "es"
- index: "lufei-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "lufei"
setup.template.name: "nginx"
setup.template.pattern: "nginx-*"
setup.template.enabled: false
setup.template.overwrite: true
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
都配置完后
# 在初始化过程中,注意观察log,有报错及时处理
[root@cs ~]# filebeat setup -e
# 重启filebeat服务
[root@cs ~]# systemctl restart filebeat
# 监听filebeat日志信息,没有发现报错,说明配置的没问题
[root@cs ~]# tail -f /var/log/filebeat/filebeat
kibana中management添加filebeat索引,discover查看即可
kibana画图
数据准备
路飞学城提供的真实的nginx访问日志,唯一不好之处,就是数据是死的,展示不出动态的效果。nginx_json.log文件,上传到本地,把数据弄到es上,kibana就能访问数据画图了
修改filebeat配置
# 把nginx_json.log日志文件添加到filebeat进行日志收集
[root@cs ~]# vim /etc/filebeat/filebeat.yml
[root@cs ~]# egrep -v "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["access"]
- type: log
enabled: true
paths:
- /var/log/nginx/error.log
tags: ["error"]
- type: log
enabled: true
paths:
- /var/log/elasticsearch/elasticsearch.log
tags: ["es"]
multiline.pattern: '^\['
multiline.negate: true
multiline.match: after
# 添加nginx_json.log日志文件
- type: log
enabled: true
paths:
- /root/nginx_json.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["lufei"]
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
host: "192.168.189.135:5601"
output.elasticsearch:
hosts: ["localhost:9200"]
indices:
- index: "nginx-access-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "access"
- index: "nginx-error-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "error"
- index: "es-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "es"
# 给nginx_json.log日志文件建立索引
- index: "lufei-%{[beat.version]}-%{+yyyy.MM}"
when.contains:
tags: "lufei"
setup.template.name: "nginx"
setup.template.pattern: "nginx-*"
setup.template.enabled: false
setup.template.overwrite: true
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
# 重启filebeat服务
[root@cs tmp]# systemctl restart filebeat
# 监听filebeat日志信息,没有发现报错,说明配置的没问题
[root@cs ~]# tail -f /var/log/filebeat/filebeat
kibana中management添加lufei索引,discover查看即可
问题:如果误删lufei索引,怎样重新添加索引数据
filebeat在收集文件时,会记录当前的offset,保存某个文件,下次收集是,会从当前位置往后收集,避免了重复收集问题
因为nginx_json.log是死数据,不能更新数据,所以我们要找到记录offset的文件,然后删除掉它,再重启filebeat
[root@cs ~]# find / -name registry
/var/lib/filebeat/registry
# 关服务 - 删除文件 - 启动服务
[root@cs ~]# systemctl stop filebeat
[root@cs ~]# rm -rf /var/lib/filebeat/registry
[root@cs ~]# systemctl start filebeat
kibana中management添加lufei索引,discover查看即可
1.柱形图-表格-饼形图等
Visualize 添加各种图表,添加terms,添加字段,调整,保存
Dashboard 集成图画,添加你在Visualize中画的图