111111

自动化-ELK日志管理

 
2台服务器  node1  192.168.1.105    node2  192.168.1.106    /etc/hosts   分别放着解析地址

#  wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo           ###2台服务器上操作

http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html    官网 下载 jdk--elasticsearch

# yum install jdk-8u171-linux-x64.rpm elasticsearch-5.4.0.rpm                                            ###2台服务器上操作

# vim /etc/elasticsearch/elasticsearch.yml                                 ####node1 操作

# grep "^[a-Z]" /etc/elasticsearch/elasticsearch.yml 
cluster.name: elk-cluster1
node.name: elk-node1
path.data: /data/elkdata
path.logs: /data/logs
bootstrap.memory_lock: true
network.host: 192.168.1.105
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.1.105", "192.168.1.106"]

在2台服务器上都操作   mkdir /data/elkdata   && mkdir /data/logs       chown elasticsearch.elasticsearch /data/ -R

####node1操作

# vim /usr/lib/systemd/system/elasticsearch.service 

40   LimitMEMLOCK=infinity   

# cat /etc/elasticsearch/jvm.options   一些内存什么的配置文件,如果修改可以修改这个 物理内存的一半

-Xms1g
-Xmx1g

#systemctl daemon-reload

# systemctl restart elasticsearch.service

scp /etc/elasticsearch/elasticsearch.yml 192.168.1.104:/etc/elasticsearch/

###node2操作

# grep '^[a-z]' /etc/elasticsearch/elasticsearch.yml 
cluster.name: elk-cluster1
node.name: elk-node2
path.data: /data/elkdata
path.logs: /data/logs
bootstrap.memory_lock: true
network.host:192.168.1.106
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.1.105", "192.168.1.106"]

# vim /usr/lib/systemd/system/elasticsearch.service 

LimitMEMLOCK=infinity   

# systemctl restart elasticsearch.service

##上传elasticsearch-head.tar.gz   && 解压 tar xvf elasticsearch-head.tar.gz  

## yum install npm -y

#cd elasticsearch-head

#npm run start &   ##node1操作

 # vim /etc/elasticsearch/elasticsearch.yml      ##2台都操作

bootstrap.memory_lock: true

http.cors.enabled: true
http.cors.allow-origin: "*"

 

chmod 644 /var/log/messages

   

注意: 如果ELK起不来 报 [1]: system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk     这是centos 6系统不支持SecComp  需要修改 /etc/elasticsearch/elasticsearch.yml 

bootstrap.memory_lock: false

bootstrap.system_call_filter: false

 

 


##### node2


yum install mariadb mariadb-server gem


gem sources --add http://gems.ruby-china.com/ --remove https://rubygems.org


###node1

yum install kibana-5.6.5-x86_64.rmp

vim /etc/kibana/kibana.conf


2 行 server.port 5601

7 server.host: "192.168.1.105"
21 行 elasticsearch.url: http://192.168.1.106:9200


system restart kibana

http://192.168.1.105:5601


[logstash-system-log-1105]-YYYY.MM.DD

create

###node1安装nginx 代理kibana 用户密码登录

useradd nginx

tar -xvf nginx-1.10.3.tar.gz

cd nginx-1.10.3

yum install pcr/usr/local/nginx/conf/conf.d/e pcre-devel  openssl openssl-devel

 ./configure --prefix=/usr/local/nginx --with-http_sub_module --with-http_ssl_module

make

make install

vim /usr/local/nginx/conf/nginx.conf

user nginx;

worker_processes auto;

mkdir /usr/local/nginx/conf/conf.d

include /usr/local/nginx/conf/conf.d/*.conf;

 vim /usr/local/nginx/conf/conf.d/kibana5612.conf

upstream kibana_server {

        server  127.0.0.1:5601 weight=1 max_fails=3  fail_timeout=60;

}

 

server {

        listen 80;

        server_name www.kibana5612.com;

        location / {

        proxy_pass http://kibana_server;

        proxy_http_version 1.1;

        proxy_set_header Upgrade $http_upgrade;

        proxy_set_header Connection 'upgrade';

        proxy_set_header Host $host;

        proxy_cache_bypass $http_upgrade;

        }

}

 

 #yum install httpd-tools -y

 #htpasswd -bc /usr/local/nginx/htppass.txt kibana

#chown nginx.nginx /usr/local/nginx/ -R

#vim /usr/local/nginx/conf/conf.d/kibana5612.conf

在server_name下加这2行

auth_basic "Restricted Access";
auth_basic_user_file /usr/local/nginx/htppass.txt;

/usr/local/nginx/sbin/nginx

#yum install  logstash-5.6.5.rpm

#vim /usr/local/nginx/conf/nginx.conf

 

log_format access_json '{"@timestamp":"$time_iso8601",'
        '"host":"$server_addr",'
        '"clientip":"$remote_addr",'
        '"size":$body_bytes_sent,'
        '"responsetime":$request_time,'
        '"upstreamtime":"$upstream_response_time",'
        '"upstreamhost":"$upstream_addr",'
        '"http_host":"$host",'
        '"url":"$uri",'
        '"domain":"$host",'
        '"xff":"$http_x_forwarded_for",'
        '"referer":"$http_referer",'
        '"status":"$status"}';

 

用这个或者上面那个

[root@linux-node1 ~]# vim /usr/local/nginx/conf/nginx.conf

log_format json '{"@timestamp":"$time_iso8601",'

               '"@version":"1",'

               '"client":"$remote_addr",'

               '"url":"$uri",'

               '"status":"$status",'

               '"domain":"$host",'

               '"host":"$server_addr",'

               '"size":$body_bytes_sent,'

               '"responsetime":$request_time,'

               '"referer": "$http_referer",'

               '"ua": "$http_user_agent"'

               '}';

access_log  logs/access_json.log  json;

 

 

 

    access_log  /var/log/nginx/access.log  access_json;

#mkdir /var/log/nginx

#chown nginx.nginx /var/log/nginx -R

#mkdir /usr/local/nginx/html/web/   && touch /usr/local/nginx/html/web/index.html

#echo "is web" > /usr/local/nginx/html/web/index.html

#/usr/local/nginx/sbin/nginx -s reload

用页面访问一下 发现日志格式变成json

 

vim /etc/logstash/conf.d/nginx.conf
input{
  file {
    path => "/var/log/nginx/access.log"
    type => "nginx-access-log-1105"
    start_position => "beginning"
    stat_interval => "2"
    codec => "json"
  }
  file {
    path => "/var/log/messages"
    type => "system-log-1105"
    start_position => "beginning"
    stat_interval => "2"
  }
}

output{
  if [type] == "nginx-access-log-1105" {
    elasticsearch {
      hosts => ["192.168.1.105:9200"]
      index => "logstash-nginx-accesslog-1105-%{+YYYY.MM.dd}"
    }
  }
  if [type] == "system-log-1105" {
    elasticsearch {
      hosts => ["192.168.56.12:9200"]
      index => "logstash-system-log-1105-%{+YYYY.MM.dd}"
    }
  }
}

/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/nginx.conf -t

然后根据ela集群往kibana上添加数据

 

#node2

#mkdir /apps
#tar -xvf apache-tomcat-7.0.69 -C /apps
#ln -s /apps/apache-tomcat-7.0.69 /apps/tomcat
#cd webpps
#touch index.html && echo "tomcat" >> index.html
#chmod a+x /tomcat/bin/*.sh
#/tomcat/bin/catalina.sh start
#vim ../conf/server.xml

<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
               prefix="tomcat_access_log" suffix=".log"
               pattern="{&quot;clientip&quot;:&quot;%h&quot;,&quot;ClientUser&quot;:&quot;%l&quot;,&quot;authenticated&quot;:&quot;%u&quot;,&quot;AccessTime&quot;:&quot;%t&quot;,&quot;method&quot;:&quot;%r&quot;,&quot;status&quot;:&quot;%s&quot;,&quot;SendBytes&quot;:&quot;%b&quot;,&quot;Query?string&quot;:&quot;%q&quot;,&quot;partner&quot;:&quot;%{Referer}i&quot;,&quot;AgentVersion&quot;:&quot;%{User-Agent}i&quot;}"/>

/tomcat/bin/catalina.sh stop && start     重启tomcat


vim /etc/logstash/conf.d/tomcat.conf
input {
  file {
    path => "/apps/tomcat/logs/tomcat_access_log.*.log"
    type => "tomcat-access-log-1106"
    start_position => "beginning"
    stat_interval => "2"
    codec => "json"
  } 
}

output {
  elasticsearch {
    hosts => ["192.168.1.106:9200"]
    index => "logstash-tomcat-access-log-1106-%{+YYYY.MM.dd}"   
 }
  file {
    path => "/tmp/tomcat.txt"  
  }
}

/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tomcat.conf -t
systemctl restart logstash

 

###node2

systemcat start mariadb.service

mysql

create database elk  character set utf8 collate utf8_bin;

grant all privileges on elk.* to elk@"%" identified by '123456';

 

########node1

unzip  mysql-connector-java-5.1.42.zip

mkdir -p  /usr/share/logstash/vendor/jar/jdbc

cp mysql-connector-java-5.1.42-bin.jar  /usr/share/logstash/vendor/jar/jdbc/

chown  logstash.logstash /usr/share/logstash/vendor/jar/  

yum install gem

 

gem sources --add https://gems.ruby-china.org/ --remove https://rubygems.org/

 

gem source list

 

/usr/share/logstash/bin/logstash-plugin  list

 

/usr/share/logstash/bin/logstash-plugin   install  logstash-output-jdbc

 

 

 #####收集tcp日志

node2###

cd /etc/logstash/conf.d/tcp.conf

 

 input {
  tcp {
  port => "1234"
  type => "tcplog"
}
}
output {                                                             ########
  stdout {                                                            ######
    codec => "rubydebug"                                      ##测试
}                                                                        ######
}                                                                          ####

output {                                                               ####正式
  elasticsearch {
    hosts => ["192.168.1.105:9200"]
    index => "tcplog-1106-%{+YYYY.MM.dd}"
}
}



/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tcp.conf
systemctl restart logstas.service
######nod3
yum install nc -y
echo "test" | nc 192.168.1.106 1234
nc 192.168.1.106 1234 < /etc/passwd

在kibana上创建
[tcplog-1106]-YYYY.MM.DD

 

 

#############收集 syslog

 node1安装haproxy

cd /usr/local/src/   && tar -xvf haproxy-1.7.9.tar.gz && cd haproxy

yum install gcc pcre pcre-devel openssl  openssl-devel -y

make TARGET=linux2628 USE_PCRE=1 USE_OPENSSL=1 USE=ZLIB=1 PREFIX=/usr/local/haproxy

make install PREFIX=/usr/local/haproxy

cp /usr/local/src/haproxy-1.7.9/haproxy /usr/sbin/

cp /usr/local/src/haproxy-1.7.9/haproxy-systemd-wrapperd /usr/sbin/

vim /etc/sysconfig/haproxy

# Add extra options to the haproxy daemon here. This can be useful for

# specifying multiple configuration files with multiple -f options.

# See haproxy(1) for a complete list of options.

OPTIONS=""

 

vim /usr/lib/systemd/system/haproxy.service

[Unit]

Description=HAProxy Load Balancer

After=syslog.target network.target

 

[Service]

EnvironmentFile=/etc/sysconfig/haproxy

ExecStart=/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid $OPTIONS

ExecReload=/bin/kill -USR2 $MAINPID

[Install]

WantedBy=multi-user.target

mkdir /etc/haproxy/

vim /etc/haproxy/haproxy.cfg

global

maxconn 100000

chroot /usr/local/haproxy

uid 99

gid 99

daemon

nbproc 1

pidfile /usr/local/haproxy/run/haproxy.pid

log 127.0.0.1 local6 info

defaults

option http-keep-alive

option  forwardfor

maxconn 100000

mode http

timeout connect 300000ms

timeout client  300000ms

timeout server  300000ms

listen stats

 mode http

 bind 0.0.0.0:9999

 stats enable

 log global

 stats uri     /haproxy-status

 stats auth    haadmin:123456

frontend kibana_web_port

    bind 192.168.1.105:80

    mode http

    log global #必须开启日志

    default_backend kibana_web_http_nodes

backend kibana_web_http_nodes

    mode http

    #balance source

    balance roundrobin

    cookie  SESSION_COOKIE  insert indirect nocache

    #option httpchk GET /XXX/XXX.

    server 192.168.1.105  192.168.1.105:5601  cookie kibana-web1 check inter 2000 fall 3 rise 5

 systemctl restart haproxy.service

网页访问192.168.1.105  发现会自动访问到5601的kibana

 

vim /etc/rsyslog.conf  打开

 15 $ModLoad imudp
 16 $UDPServerRun 514
 19 $ModLoad imtcp
 20 $InputTCPServerRun 514

 92 local6.*        /var/log/haproxy.log

93 local6.*       @@192.168.1.106:1514

 systemctl restart rsyslog

网页访问 192.168.1.105:9999、haproxy-status

 

####node2

vim /etc/logstash/conf.d/rsyslog.conf

input {
  syslog {
    port => "1514"
    type => "rsyslog-1106"
  }
}

output {
  if [type] == "rsyslog-1106" {
  elasticsearch {
  hosts => ["192.168.1.105:9200"]
  index => "rsyslog-1106-%{+YYYY.MM.dd}"
  }
  }
}

systemctl restart logstash

在kibana添加

 

####安装zookeeper集群

3台主机  192.168.1.105 node1     192.168.1.106 node2  192.168.1.104 node3

/etc/hosts 主机互相解析

安装jdk  ##上传jdk包到/usr/local/src

yum install jdk-8u151-linux-x64.rmp -y

上传zookeeper-3.4.11.tar.gz /usr/local/src/     ###3台子都操作 因为集群是基数比较好这样服务器宕机了一台没事 如果双数就没事了

tar xvf zookeeper-3.4.11.tar.gz                        ####3台操作

ln -s /usr/local/src/zookeeper-3.4.11 /usr/local/zookeeper   ########3台操作

mkdir /usr/local/zookeeper/data         #####3台都操作

cp /usr/local/zookeeper/conf/zoo_sample.cfg /usr/local/zookeeper/conf/zoo.cfg    ##3台                   ###copy拷贝配置模板文件

vim /usr/local/zookeeper/zoo.cfg

 tickTime=2000            ##服务器直接或客户端与服务器之间的单次心跳检测时间间隔,单位毫秒

initLimit=10                  ##集群中leader服务器与follower服务器第一次连接最多次数 

syncLimit=5                 ##leader与 follower之间发送和应答时间,如果follower在设置的时间内不能与leader通信,那么follower视为不可用。

dataDir=usr/local/zookeeper/data          ###自定义的zookeeper保存数据的目录

clientPort=2181                            ##客户端连接zookeeper服务器的端口,zookeeper会监听这个端口,接受客户端的访问请求

server.1=192.168.1.105:2888:3888           ###服务器编号=服务器IP:LF数据同步端口:LF选举端口

server.2=192.168.1.106:2888:3888

server.3=192.168.1.104:2888.3888

 

#echo "1" > /usr/local/zookeeper/data/myid     ##node1执行

#echo "2" > /usr/local/zookeeper/data/myid     ##node2执行

#echo "3" > /usr/local/zookeeper/data/myid     ##node3执行

 

/usr/local/zookeeper/bin/zkServer.sh start     #3台都执行

 

/usr/local/zookeeper/bin/zkServer.sh status  ##查看其中有一台是leader    注#如果起不来得看下ps -ef |grep java进程在不在kill了  启动顺序是 myid1 - myid 2- myid3

 

上传kafka /usr/local/src/    ## 3台都执行

node1##

tar xvf /usr/local/src/kafka_2.11-1.0.0.tgz

 ln -sv /usr/local/src/kafka_2.11-1.0.0 /usr/local/kafka

vim /usr/local/kafka/config/server.properties

broker.id=1

listeners=PLAINTEXT://192.168.1.105:9092

zookeeper.connect=l92.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181

 #/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

node2##

tar xvf /usr/local/src/kifka_2.11-1.0.0.tgz

 ln -sv /usr/local/src/kafka_2.11-1.0.0 /usr/local/kafka

vim /usr/local/kafka/config/server.properties

broker.id=2

listeners=PLAINTEXT://192.168.1.106:9092

zookeeper.connect=l92.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181

 

 #/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

node3##

tar xvf /usr/local/src/kifka_2.11-1.0.0.tgz

 ln -sv /usr/local/src/kafka_2.11-1.0.0 /usr/local/kafka

vim /usr/local/kafka/config/server.properties

broker.id=3

listeners=PLAINTEXT://192.168.1.101:9092

zookeeper.connect=l92.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181

 #/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

 

 /usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181 --partitions 3 --replication-factor 3 --topic logstashtest   #测试创建topic

/usr/local/kafka/bin/kafka-topics.sh --describe --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181 --topic logstashtest  #测试获取topic

 

 

 /usr/local/kafka/bin/kafka-topics.sh --delete --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181 --topic logstashtest    ##删除topic

/usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181   #获取所有topic

/usr/local/kafka/bin/kafka-console-producer.sh --broker-list 192.168.1.105:9092,192.168.1.106:9092,192.168.1.101:9092 --topic logstashtest    ##发送消息

 

 /usr/local/kafka/bin/kafka-console-consumer.sh --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181 --topic logstashtest --from-beginning  ###其他kafka机器接收数据

 

 

 

 

 

 

posted @ 2018-05-21 12:03  赵SIR  阅读(259)  评论(0编辑  收藏  举报