logstash redis kafka传输 haproxy日志

logstash 客户端收集 haproxy  tcp日志

 

input {
file {
path => "/data/haproxy/logs/haproxy_http.log"
start_position => "beginning"
type => "haproxy_http"
}
file {
path => "/data/haproxy/logs/haproxy_tcp.log"
start_position => "beginning"
type => "haproxy_tcp"
}
}

filter {
if [type] == "haproxy_http" {
grok{
patterns_dir => "/data/logstash/patterns"
match => {"message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{FENG:captured_request_cookie} %{FENG:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} \"%{WORD:verb} %{URIPATHPARAM:request} %{WORD:http_socke}/%{NUMBER:http_version}\""}
}
geoip {
source => "client_ip"
fields => ["ip","city_name","country_name","location"]
add_tag => [ "geoip" ]
}
} else if [type] == "haproxy_tcp" {
grok {
match => { "message" => "(?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue}" }
}
}
}

output {
if [type] == "haproxy_http" {
redis {
host => "192.168.20.166"
port => "6379"
db => "5"
data_type => "list"
key => "haproxy_http.log"
}
} else if [type] == "haproxy_tcp" {
redis {
host => "192.168.20.166"
port => "6379"
db => "4"
data_type => "list"
key => "haproxy_tcp.log"
}
}
}

 

 

logstash 服务器端把 haproxy  tcp日志写入到elasticsearch中

[root@logstashserver etc]# cat logstash.conf

input {
if [type] == "haproxy_http" {
redis {
host => "192.168.20.166"
port => "6379"
db => "5"
data_type => "list"
key => "haproxy_http.log"
}
} else if [type] == "haproxy_tcp" {
redis {
host => "192.168.20.166"
port => "6379"
db => "4"
data_type => "list"
key => "haproxy_tcp.log"
}
}
}

 

output {
if [type] == "haproxy_http" {
elasticsearch {
hosts => ["es1:9200","es2:9200","es3:9200"]
manage_template => true
index => "logstash-haproxy-http.log-%{+YYYY-MM-dd}"
}
}
if [type] == "haproxy_tcp" {
elasticsearch {
hosts => ["es1:9200","es2:9200","es3:9200"]
manage_template => true
index => "logstash-haproxy-tcp.log-%{+YYYY-MM-dd}"
}
}
}

 

 

#########################################kafka###############################################

客户端

input {
file {
path => "/data/haproxy/logs/haproxy_http.log"
start_position => "beginning"
type => "haproxy_http"
}
file {
path => "/data/haproxy/logs/haproxy_tcp.log"
start_position => "beginning"
type => "haproxy_tcp"
}
}

filter {
if [type] == "haproxy_http" {
grok{
patterns_dir => "/data/logstash/patterns"
match => {"message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{FENG:captured_request_cookie} %{FENG:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} \"%{WORD:verb} %{URIPATHPARAM:request} %{WORD:http_socke}/%{NUMBER:http_version}\""}
}
geoip {
source => "client_ip"
fields => ["ip","city_name","country_name","location"]
add_tag => [ "geoip" ]
}
} else if [type] == "haproxy_tcp" {
grok {
match => { "message" => "(?:%{SYSLOGTIMESTAMP:syslog_timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} \[%{HAPROXYDATE:accept_date}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_queue}/%{INT:time_backend_connect}/%{NOTSPACE:time_duration} %{NOTSPACE:bytes_read} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue}" }
}
}
}

output {
if [type] == "haproxy_http" {
kafka { #输出到kafka
bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092" #他们就是生产者
topic_id => "haproxy_http.log" #这个将作为主题的名称,将会自动创建
compression_type => "snappy" #压缩类型
}
} else if [type] == "haproxy_tcp" {
kafka { #输出到kafka
bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092" #他们就是生产者
topic_id => "haproxy_tcp.log" #这个将作为主题的名称,将会自动创建
compression_type => "snappy" #压缩类型
}
}
}

 

服务器端

input {
if [type] == "haproxy_http" {
kafka {
zk_connect => "zookeeper1:2181,zookeeper2:2181,zookeeper3:2181"
topic_id => "haproxy_http.log"
reset_beginning => false
consumer_threads => 5
decorate_events => true
}
} else if [type] == "haproxy_tcp" {
kafka {
zk_connect => "zookeeper1:2181,zookeeper2:2181,zookeeper3:2181"
topic_id => "haproxy_tcp.log"
reset_beginning => false
consumer_threads => 5
decorate_events => true
}
}
}


output {
if [type] == "haproxy_http" {
elasticsearch {
hosts => ["es1:9200","es2:9200","es3:9200"]
manage_template => true
index => "logstash-haproxy-http.log-%{+YYYY-MM-dd}"
}
}
if [type] == "haproxy_tcp" {
elasticsearch {
hosts => ["es1:9200","es2:9200","es3:9200"]
manage_template => true
index => "logstash-haproxy-tcp.log-%{+YYYY-MM-dd}"
}
}
}

posted @ 2016-09-29 14:05  fengjian1585  阅读(1430)  评论(0编辑  收藏  举报