filebeat 收集 nginx 日志到 kibana 展示
首先是 nginx.conf 的日志格式
json 格式很多,不一定非要这个
log_format json '{ "access_time": "$time_iso8601", "remote_addr": "$remote_addr", "remote_user": "$remote_user", "request": "$request", "status": $status, "bytes": $body_bytes_sent, "referer": "$http_referer", "agent": "$http_user_agent", "x_forwarded": "$http_x_forwarded_for", "http_eshimin_version": "$http_eshimin_version", "server_addr": "$server_addr", "up_addr": "$upstream_addr", "up_host": "$upstream_http_host", "up_resp_time": "$upstream_response_time", "request_time": $request_time , "domain": "$host"}';
access_log logs/access.log json;
然后是 filebeat.yml
filebeat.inputs:
- type: log
id: my-filestream-id
enabled: true
paths:
- /usr/local/nginx/logs/access.log
json.keys_under_root: true
json.overwrite_keys: true
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
output.logstash:
hosts: ["192.168.96.239:6044"]
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
logstash.conf
path.data: /data/elk/logstash/data
path.logs: /data/elk/logstash/log
http.host: "192.168.96.239"
node.name: "logstash115"
xpack.monitoring.elasticsearch.hosts: ["http://192.168.96.101:9200"]
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: "elastic"
xpack.monitoring.elasticsearch.password: "123"
xpack.monitoring.collection.interval: 30s
xpack.monitoring.collection.pipeline.details.enabled: true
xpack.geoip.download.endpoint: "https://geoip.elastic.co/v1/database"
logstash 下面的 nginx.conf
input {
beats {
port =>6044
}
}
filter{
mutate{
rename => {"status"=>"nginx_status"}
}
}
output {
elasticsearch {
hosts => ["http://192.168.96.101:9200"]
index => "nginx-%{+YYYY.MM.dd}"
user => "elastic"
password => "123"
}
}
kibana 导入仪表盘即可