一、logstash收集日志写入redis
1.配置logstash收集单个日志到redis
[root@web 01 ~]# vim /etc/logstash/conf.d/file_redis.conf
input {
file {
path => "/var/log/nginx/access.log"
start_position => "end"
codec => "json"
}
}
output {
redis {
host => "172.16.1.81"
port => "6379"
key => "nginx_log"
data_type => "list"
}
}
2.配置logstash收集多个日志到redis
1)配置
[root@web 01 ~]# cat /etc/logstash/conf.d/file_redis.conf
input {
file {
type => "nginx_log"
path => "/var/log/nginx/access.log"
start_position => "end"
codec => "json"
}
file {
type => "tomcat_log"
path => "/usr/local/tomcat/logs/tomcat_access_json.*.log"
start_position => "end"
codec => "json"
}
}
output {
if [type] == "nginx_log" {
redis {
host => "172.16.1.81"
port => "6379"
key => "nginx_log"
data_type => "list"
db => "0"
}
}
if [type] == "tomcat_log" {
redis {
host => "172.16.1.81"
port => "6379"
key => "tomcat_log"
data_type => "list"
db => "1"
}
}
}
2)启动
[root@web01 ~ ]# logstash -f /etc/logstash/conf.d/file_redis.conf
3)访问测试查看redis数据
[root@redis01 ~]
127.0.0.1:6379> KEYS *
1 ) "nginx_log"
127.0.0.1:6379> LLEN nginx_log
(integer) 12
127.0.0.1:6379> SELECT 1
OK
127.0.0.1:6379[1]> KEYS *
1 ) "tomcat_log"
127.0.0.1:6379[1]> LLEN tomcat_log
(integer) 18
3.配置logstash取出redis数据写入ES
1)配置
[root@redis 02 ~]# vim /etc/logstash/conf.d/redis_es.conf
input {
redis {
host => "172.16.1.81"
port => "6379"
data_type => "list"
key => "nginx_log"
db => "0"
}
redis {
host => "172.16.1.81"
port => "6379"
data_type => "list"
key => "tomcat_log"
db => "1"
}
}
output {
if [type] == "nginx_log" {
elasticsearch {
hosts => ["10.0.0.71:9200" ]
index => "nginx_redis_es_%{+YYYY-MM-dd}"
}
}
if [type] == "tomcat_log" {
elasticsearch {
hosts => ["10.0.0.71:9200" ]
index => "tomcat_redis_es_%{+YYYY-MM-dd}"
}
}
}
2)启动
[root@redis02 ~ ]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis_es.conf
3)查看redis数据
127.0.0.1:6379[1]> LLEN tomcat_log
(integer) 55
127.0.0.1:6379[1]> LLEN tomcat_log
(integer) 0
127.0.0.1:6379[1]> SELECT 0
OK
127.0.0.1:6379> LLEN nginx_log
(integer) 0
二、使用logstash配置rsyslog收集haproxy日志
1.rsyslog介绍
在centos 6 及之前的版本叫做syslog,centos 7 开始叫做rsyslog,根据官方的介绍,rsyslog (2013 年版本)可以达到每秒转发百万条日志的级别,rsyslog是日志收集处理工具
它提供了高性能,出色的安全性和模块化设计。尽管rsyslog最初是常规的syslogd,但已发展成为一种瑞士军刀式的记录工具,能够接受来自各种来源的输入,并将其转换,然后输出到不同的目的地。
当应用有限的处理时,RSYSLOG每秒可以将超过一百万的消息传递到本地目的地。即使在远程目的地和更精细的处理下,性能通常也被认为是“惊人的”。
2.安装配置rsyslog
[root@lb01 ~]
[root@lb01 ~]
$ModLoad imudp
$UDPServerRun 514
$ModLoad imtcp
$InputTCPServerRun 514
local6.* @@10 .0.0 .53 : 2222
3.安装并配置haproxy
[root@lb01 ~]
[root@lb01 ~]
global
maxconn 100000
chroot /var/lib/haproxy
uid 99
gid 99
daemon
nbproc 1
pidfile /var/run/haproxy.pid
log 127.0.0.1 local6 info
defaults
option http-keep-alive
option forwardfor
maxconn 100000
mode http
timeout connect 300000ms
timeout client 300000ms
timeout server 300000ms
listen status
mode http
bind 0.0.0.0:9999
status enable
log global
status uri /haproxy-status
status auth haadmin:123456
frontend web_port
bind 0.0.0.0:80
mode http
option httplog
log global
option forwardfor
acl tomcat hdr_dom(host) -i linux.tomcat.com
acl nginx hdr_dom(host) -i linux.nginx.com
use_backend tomcat_host if tomcat
use_backend nginx_host if nginx
backend tomcat_host
mode http
option httplog
balance source
server web1 10.0.0.7:8080 check inter 2000 rise 3 fall 2 weight 1
backend nginx_host
mode http
option httplog
balance source
server web1 10.0.0.7:80 check inter 2000 rise 3 fall 2 weight 1
check:检查健康状态
inter:检查状态的间隔时间
rise:检查次数
fall:检查过程中错误次数
weight:权重
4.启动服务
[root@redis01 ~ ]# systemctl start haproxy.service
[root@redis01 ~ ]# systemctl start rsyslog
5.配置hosts测试
10.0.0.81 linux.tomcat.com linux.nginx.com
#分别访问
linux.tomcat.com
linux.nginx.com
6.配置logstash收集haproxy日志
1)配置收集到标准输出
[root@ redis01 ~ ]# vim / etc/ logstash/ conf.d/ haproxy_stdout.conf
input {
syslog {
port => "2222"
}
}
output {
stdout {}
}
2)配置收集到ES
[root@redis 01 ~]# vim /etc/logstash/conf.d/haproxy_es.conf
input {
syslog {
port => "2222"
}
}
output {
elasticsearch {
hosts => ["10.0.0.71:9200" ]
index => "haproxy_es_%{+YYYY-MM-dd}"
}
}
三、收集TCP/UDP日志
通过logstash的tcp/udp插件收集日志,通常用于在向elasticsearch日志补录丢失的部分日志,可以将丢失的日志通过一个TCP端口直接写入到elasticsearch服务器。
1.配置Logstash
#进入Logstash配置文件目录
[root@redis01 ~ ]# cd /etc/logstash/conf.d/
#编辑Logstash配置文件
[root@redis01 conf.d ]# vim tcp.conf
input {
tcp {
port => 1234
type => "tcplog"
mode => "server"
}
}
output {
stdout {
codec => rubydebug
}
}
2.启动
[root@redis01 conf.d]
[root@redis01 ~]
tcp 0 0 : : : 1234 : : :* LISTEN 8656 /java
3.使用telnet测试
[root@redis 02 ~]# telnet 172.16 .1 .81 1234
Trying 172.16 .1 .81 ...
Connected to 172.16 .1 .81 .
Escape character is '^]' .
13
12335346457 thgdfhbd
#查看
{
"port" => 58991 ,
"@version" => "1" ,
"@timestamp" => 2020 -12 -08 T16:58 :01.351 Z,
"host" => "172.16.1.82" ,
"message" => "13\r" ,
"type" => "tcplog"
}
{
"port" => 58991 ,
"@version" => "1" ,
"@timestamp" => 2020 -12 -08 T16:58 :27.160 Z,
"host" => "172.16.1.82" ,
"message" => "12335346457thgdfhbd\r" ,
"type" => "tcplog"
}
4.使用nc工具
1)安装nc工具
#使用yum安装nc
[root@web01 ~ ]# yum install -y nc
2)使用测试
1. 使用nc传输数据
[root@web01 ~ ]# echo "test nc" | nc 10.0.0.81 1234
2. 收集文件日志
[root@web01 ~ ]# cat /etc/passwd | nc 10.0.0.81 1234
3. 实时收集远端服务器的日志
[root@web01 ~ ]# tail -f /var/log/nginx/access.log | nc 10.0.0.81 1234
5.收集多个tcp日志到ES
1)配置
[root@redis 01 ~]# cat /etc/logstash/conf.d/tcp_es.conf
input {
tcp {
port => 1234
type => "nginxlog"
mode => "server"
}
tcp {
port => "2345"
type => "tomcatlog"
mode => "server"
}
}
output {
if [type] == "nginxlog" {
elasticsearch {
hosts => ["10.0.0.71:9200" ]
index => "tcp_nginxlog_%{+YYYY-MM-dd}"
}
}
if [type] == "tomcatlog" {
elasticsearch {
hosts => ["10.0.0.71:9200" ]
index => "tcp_tomcatlog_%{+YYYY-MM-dd}"
}
}
}
2)启动
[root@redis01 ~ ]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tcp_es.conf
3)测试
[root@web01 ~ ]# tail -f /var/log/nginx/access.log | nc 10.0.0.81 1234
[root@web01 ~ ]# tail -f /usr/local/tomcat/logs/tomcat_access_json.$(date +%F).log | nc 10.0.0.81 2345
#页面查看索引
3.配置filebeat收集日志到ES
1)配置
[root@web01 ~ ]
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
output.elasticsearch:
hosts: ["10.0.0.71:9200" ]
index:
2)启动
[root@web01 ~ ]# filebeat -e -c /etc/filebeat/filebeat.yml
[root@web01 ~ ]# systemctl start filebeat
4.Filebeat收集单类型多个日志到Logstash
1)配置
[root@web01 ~ ]
filebeat.inputs:
- type: log
paths:
- /var/log/nginx/access.log
- /usr/local/tomcat/logs/tomcat_access_json.2020-12-08.log
document_type: ngx_zls
output.logstash:
hosts: ["10.0.0.81:6666" ]
enabled: true
worker: 1
compression_level: 3
2)重启
#重启Filebeat
[root@web01 ~ ]# systemctl restart filebeat
3)logstash收集filebeat传来的数据到ES
[root@redis 01 ~]# vim /etc/logstash/conf.d/filebeat_logstash_es.conf
input {
beats {
port => "6666"
}
}
output {
elasticsearch {
hosts => ["10.0.0.71:9200" ]
index => "filebeat_logstash_%{+YYYY-MM-dd}"
}
}
4)启动logstash
[root@redis01 ~ ]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/filebeat_logstash_es.conf
5)测试
三、filebeat介绍
1.描述
Filebeat附带预构建的模块,这些模块包含收集、解析、充实和可视化各种日志文件格式数据所需的配置,每个Filebeat模块由一个或多个文件集组成,这些文件集包含摄取节点管道、Elasticsearch模板、Filebeat勘探者配置和Kibana仪表盘。
Filebeat模块很好的入门,它是轻量级单用途的日志收集工具,用于在没有安装java的服务器上专门收集日志,可以将日志转发到logstash、elasticsearch或redis等场景中进行下一步处理
2.Filebeat和Logstash使用内存对比
1)Logstash内存占用
[root@elkstack03 ~ ]
12628
[root@elkstack03 ~ ]
VmPeak: 6252788 kB
VmSize: 6189252 kB
VmLck: 0 kB
VmHWM: 661168 kB
VmRSS: 661168 kB
VmData: 6027136 kB
VmStk: 88 kB
VmExe: 4 kB
VmLib: 16648 kB
VmPTE: 1888 kB
VmSwap: 0 kB
2)filebeat内存占用
[root@test ~ ]
VmPeak: 11388 kB
VmSize: 11388 kB
VmLck: 0 kB
VmHWM: 232 kB
VmRSS: 232 kB
VmData: 10424 kB
VmStk: 88 kB
VmExe: 864 kB
VmLib: 0 kB
VmPTE: 16 kB
VmSwap: 0 kB
VmPeak: 25124 kB
VmSize: 25124 kB
VmLck: 0 kB
VmHWM: 15144 kB
VmRSS: 15144 kB
VmData: 15496 kB
VmStk: 88 kB
VmExe: 4796 kB
VmLib: 0 kB
VmPTE: 68 kB
VmSwap: 0 kB
四、filebeat部署
1.安装
#上传
[root@web01 ~ ]# rz
[root@web01 ~ ]# ll
-rw-r--r-- 1 root root 11790119 May 24 2020 filebeat-6.6 .0 -x86_64.rpm
#安装
[root@web01 ~ ]# yum localinstall -y filebeat-6.6.0-x86_64.rpm
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】凌霞软件回馈社区,博客园 & 1Panel & Halo 联合会员上线
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】博客园社区专享云产品让利特惠,阿里云新客6.5折上折
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 【.NET】调用本地 Deepseek 模型
· CSnakes vs Python.NET:高效嵌入与灵活互通的跨语言方案对比
· DeepSeek “源神”启动!「GitHub 热点速览」
· 我与微信审核的“相爱相杀”看个人小程序副业
· Plotly.NET 一个为 .NET 打造的强大开源交互式图表库