elk分布式日志
filebeat安装
下载
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.8.5-linux-x86_64.tar.gz
filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /opt/logs/mall-admin/mall-admin*.log
fields:
type: "mall-admin"
fields_under_root: true
encoding: utf-8
multiline.pattern: '^[[:space:]]+(at|\.{3})[[:space:]]+\b|^Caused by:'
multiline.negate: false
multiline.match: after
- type: log
enabled: true
paths:
- /data/study/study*.log
fields:
type: "study"
fields_under_root: true
encoding: utf-8
multiline.pattern: ^\s*\d\d\d\d-\d\d-\d\d
multiline.negate: true
multiline.match: after
#output.elasticsearch:
# enabled: true
# hosts: ["172.31.0.3:9200","172.31.0.4:9200"]
# indices:
# - index: "mall-admin-%{+yyyy.MM.dd}"
# when.contains:
# type: "mall-admin"
# - index: "study-%{+yyyy.MM.dd}"
# when.contains:
# type: "study"
output.logstash:
hosts: ["172.31.0.4:5044"]
index: "filebeat-161-%{+yyyy.MM.dd}"
filebeat启动
nohup ./filebeat -e -c filebeat.yml &
logstash安装
docker-compose.yml
version: '2.2'
services:
logstash:
image: logstash:7.6.2
container_name: logstash
environment:
- TZ=Asia/Shanghai
volumes:
- /opt/logstash/conf.d/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
- /opt/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
ports:
- 5044:5044
networks:
- elastic
networks:
elastic:
driver: bridge
logstash.conf
input {
kafka {
bootstrap_servers => "172.31.0.5:9092,172.31.0.5:9093,172.31.0.5:9094"
auto_offset_reset => "latest"
topics => ["elk-nginx-access"]
group_id => "logstash1"
client_id => "logstash1"
}
}
output {
elasticsearch {
hosts => ["http://172.31.0.3:9200","http://172.31.0.4:9200"]
index => "logs-nginx-%{+YYYY.MM.dd}"
workers => 1
}
stdout {codec => rubydebug}
}
elasticsearch安装
docker-compose.yml
version: '2.2'
services:
elasticsearch: # 服务名称
image: elasticsearch:7.5.0 # 使用的镜像
container_name: elasticsearch # 容器名称
restart: always # 失败自动重启策略
environment:
- transport.tcp.port=9300 #内部节点之间沟通端⼝
- bootstrap.memory_lock=true # 内存交换的选项,官网建议为true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m" # 设置内存,如内存不足,可以尝试调低点
ulimits: # 栈内存的上限
memlock:
soft: -1 # 不限制
hard: -1 # 不限制
volumes:
- /opt/elk_config/els/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml # 将容器中es的配置文件映射到本地
- es_data:/usr/share/elasticsearch/data # 存放数据的文件
- /opt/elk_config/els/plugins:/usr/share/elasticsearch/plugins
- /etc/localtime:/etc/localtime #时间同步
ports:
- 9200:9200 # http端口,可以直接浏览器访问
- 9300:9300 # es集群之间相互访问的端口,jar之间就是通过此端口进行tcp协议通信,遵循tcp协议。
networks:
- elastic
volumes:
es_data:
driver: local # 会生成一个对应的目录和文件i
networks:
elastic:
driver: bridge
elasticsearch.yml
cluster.name: "es-cluster"
network.host: 0.0.0.0
node.name: elasticsearch01
node.master: true
node.data: true
node.max_local_storage_nodes: 1
network.publish_host: 172.31.0.3
http.port: 9200
transport.tcp.port: 9300
discovery.seed_hosts: ["172.31.0.3","172.31.0.4"]
cluster.initial_master_nodes: ["elasticsearch01", "elasticsearch02"]
http.cors.enabled: true
http.cors.allow-origin: "*"
kibana安装
docker-compose.yml
version: '2.2'
services:
kibana: # 服务名称
depends_on:
- elasticsearch
image: kibana:7.5.0 # 使用的镜像
container_name: kibana # 容器名称
restart: always # 失败自动重启策略
links:
- elasticsearch
environment:
- server.name= kibana
- server.host= "0"
- elasticsearch.hosts= "http://172.31.0.3:9200","http://172.31.0.4:9200"
- xpack.monitoring.ui.container.elasticsearch.enabled= true
volumes:
- /opt/elk_config/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml
- kibana_data:/usr/share/kibana/data
- /etc/localtime:/etc/localtime #时间同步
ports:
- 5601:5601 # http端口,可以直接浏览器访问
networks:
- elastic
volumes:
kibana_data:
driver: local # 会生成一个对应的目录和文件
networks:
elastic:
driver: bridge
kibana.yml
server.name: kibana
server.host: "0.0.0.0"
elasticsearch.hosts: [ "http://172.31.0.3:9200","http://172.31.0.4:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true