单机版docker 安装elk,并将服务日志输入到logstash
下载
docker pull elasticsearch:7.12.1
docker pull kibana:7.12.1
docker pull logstash:7.12.1
创建网络
docker network create -d bridge es-net
启动elasticsearch
docker run --name elasticsearch --net=es-net -d -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7.12.1
启动kibana
docker run --name kibana --net=es-net -d -p 5601:5601 kibana:7.12.1
修改elasticsearch的ip & 重启
docker exec -it kibana bash
vi config/kibana.yml
elasticsearch.hosts: [ "http://{172.17.0.2}:9200" ]
启动logstash & 修改 & 重启
docker run --name logstash --net=es-net -d -p 5044:5044 logstash:7.12.1
docker exec -it logstash /bin/bash
vi /usr/share/logstash/config/logstash.yml
vi /usr/share/logstash/pipeline/logstash.conf
input {
tcp {
host => "0.0.0.0"
mode => "server"
port => 5044
}
}
filter {
}
output {
elasticsearch {
action => "index"
hosts => "172.19.0.2:9200"
index => "test_log"
}
}
配置 logback-spring.xml
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<destination>127.0.0.1:5044</destination>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>Asia/Shanghai</timeZone>
</timestamp>
<pattern>
<pattern>
{
"severity": "%level",
"service": "${springAppName:-}",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="console"/>
<appender-ref ref="LOGSTASH"/>
</root>
配置logstash 的index
Stack Management -->Index patterns -->
Create index pattern --> test_log
指向test_log,然后discover 中就有test_log 日志了
测试启动 & 查看IP
curl 127.0.0.1:9200
docker inspect elasticsearch
通过知识/经验的分享,节省开发者的时间.