kibana+es+logstash+kafka+filbeat

kibana 

10.16.1.41

mkdir -p /opt/software && cd /opt/software

wget https://artifacts.elastic.co/downloads/kibana/kibana-7.3.2-linux-x86_64.tar.gz
tar -zxvf kibana-7.3.2-linux-x86_64.tar.gz
mv kibana-7.3.2-linux-x86_64 /opt/kibana
useradd kibana -d /opt/kibana -s /sbin/nologin
chown kibana.kibana /opt/kibana -R
cat /opt/kibana/config/kibana.yml | grep -v '#' | grep -v '^$'
server.port: 5601
server.host: "0.0.0.0"
server.name: "10.16.1.241"
elasticsearch.hosts: ["http://10.16.1.245:9200",
"http://10.16.1.244:9200",
"http://10.16.1.243:9200"]
kibana.index: ".kibana"
elasticsearch.requestTimeout: 10000000
i18n.locale: "zh-CN"

##########################

elasticsearch-1

10.16.1.34

#######################

cat /etc/security/limits.conf | grep -v '#' | grep -v "^$"
* hard nofile 102400
* soft nofile 102400
mkdir -p /opt/software && cd /opt/software
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-linux-x86_64.tar.gz
tar -zxvf elasticsearch-7.3.2-linux-x86_64.tar.gz
mv elasticsearch-7.3.2 /opt/elasticsearch
useradd elasticsearch -d /opt/elasticsearch -s /sbin/nologin
mkdir -p /opt/logs/elasticsearch
chown elasticsearch.elasticsearch /opt -R
echo "vm.max_map_count = 655350" >> /etc/sysctl.conf

sysctl -p

cat /opt/elasticsearch/config/elasticsearch.yml | grep -v '#' | grep -v '^$'

cluster.name: my-application
node.name: 10.16.1.34
path.logs: /opt/logs/elasticsearch
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["10.16.1.34", "10.16.1.35","10.16.1.36"]
cluster.initial_master_nodes: ["10.16.1.34", "10.16.1.35","10.16.1.36"]
gateway.recover_after_nodes: 2
action.auto_create_index: true

elasticsearch-2

10.16.1.35

cat /etc/security/limits.conf | grep -v '#' | grep -v "^$"
* hard nofile 102400
* soft nofile 102400
mkdir -p /opt/software && cd /opt/software
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-linux-x86_64.tar.gz
tar -zxvf elasticsearch-7.3.2-linux-x86_64.tar.gz
mv elasticsearch-7.3.2 /opt/elasticsearch
useradd elasticsearch -d /opt/elasticsearch -s /sbin/nologin
mkdir -p /opt/logs/elasticsearch
chown elasticsearch.elasticsearch /opt -R
echo "vm.max_map_count = 655350" >> /etc/sysctl.conf

sysctl -p

cat /opt/elasticsearch/config/elasticsearch.yml | grep -v '#' | grep -v '^$'

cluster.name: my-application
node.name: 10.16.1.35
path.logs: /opt/logs/elasticsearch
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["10.16.1.34", "10.16.1.35","10.16.1.36"]
cluster.initial_master_nodes: ["10.16.1.34", "10.16.1.35","10.16.1.36"]
gateway.recover_after_nodes: 2
action.auto_create_index: true

#################################

elasticsearch-3

10.16.1.36

cat /opt/elasticsearch/config/elasticsearch.yml | grep -v '#' | grep -v '^$'

cluster.name: my-application
node.name: 10.16.1.36
path.logs: /opt/logs/elasticsearch
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["10.16.1.34", "10.16.1.35","10.16.1.36"]
cluster.initial_master_nodes: ["10.16.1.34", "10.16.1.35","10.16.1.36"]
gateway.recover_after_nodes: 2
action.auto_create_index: true

##########################################

搭建kafka

10.16.1.43

Kafka

  1. Kafka依赖于Zookkeeper
  2. 两个都依赖于Java

 

Kafka依赖于Zookeeper

  1. 官方网站:https://zookeeper.apache.org/
  2. 下载ZK的二进制包
  3. 解压到对应目录完成安装

 

ZK的安装命令

tar -zxf zookeeper-3.4.13.tar.gz

mv zookeeper-3.4.13 /usr/local/

cp /usr/local/zookeeper-3.4.13/conf/zoo_sample.cfg  /usr/local/zookeeper-3.4.13/conf/zoo.cfg

 

ZK的启动

  1. 添加配置:clientPortAddress=0.0.0.0
  2. 启动:/usr/local/zookeeper-3.4.13/bin/zkServer.sh start

Kafka下载地址

  1. Kafka官网:http://kafka.apache.org/
  2. 下载Kafka的二进制包
  3. 解压到对应目录完成安装

Kafka的安装命令

cd /usr/local/src/

tar -zxf kafka_2.11-2.1.1.tgz

mv kafka_2.11-2.1.1 /usr/local/kafka_2.11

Kafka的启动

  1. 更改kafka的配置:更改监听地址、更改连接zk的地址
  2. 前台启动:/usr/local/kafka_2.11/bin/kafka-server-start.sh /usr/local/kafka_2.11/config/server.properties
  3. 启动kafka:nohup /usr/local/kafka_2.11/bin/kafka-server-start.sh /usr/local/kafka_2.11/config/server.properties >/tmp/kafka.log 2>&1 &

搭建logstash

cd /data/
tar xvf logstash-8.5.3-linux-x86_64.tar.gz
mv logstash-8.5.3 logstash
cd logstash
cd config/
cat /data/logstash/config/logstash.conf
# Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.
input {
kafka {
bootstrap_servers => "10.16.1.43:9092"
topics => ["test"]
group_id => "test"
fetch_max_bytes => 5242880
max_poll_records => 90
consumer_threads => 9
decorate_events => true
type => "default"
}
}
output {
elasticsearch {
hosts => ["http://10.16.1.34:9200","http://10.16.1.35:9200","http://10.16.1.36:9200"]
index => "144-12nginx-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}

}

filebeat 搭建

cd /data/software

wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.3.2-linux-x86_64.tar.gz

tar xvf filebeat-7.3.2-linux-x86_64.tar.gz 

 mv filebeat-7.3.2-linux-x86_64 /data/filebeat

cd /data/filebeat/

cat /data/filebeat/filebeat.yml

filebeat.inputs:
- type: log
tail_files: true
backoff: "1s"
paths:
- /nfsdata/*/*.log #nfsdata下得子目录下得.log文件
output:
kafka:
hosts: ["10.16.1.43:9092","10.16.1.39:9092"]
topic: test

posted @ 2022-12-17 13:27  繁星下的晴空  阅读(68)  评论(0编辑  收藏  举报