es安装
1.Elasticsearch
准备
#创建es数据存储目录
sudo mkdir /data/elasticsearch/data -p
sudo mkdir /data/elasticsearch/logs -p
#创建es用户
sudo useradd es(同时会创建组)
sudo passwd es
#修改目录所属用户
sudo chown -R es:es /data/elasticsearch
#centos操作系统调优
sudo vim /etc/sysctl.conf
#在/etc/sysctl.conf 添加如下内容
fs.file-max=655360
vm.max_map_count=655360
#生效
sudo sysctl -p
sudo vim /etc/security/limits.conf
#文件末尾添加以下参数
* soft nproc 4096
* hard nproc 16384
* soft nofile 65536
* hard nofile 65536
sudo vim /etc/security/limits.d/90-nproc.conf
* soft nproc 1024
#修改为
* soft nproc 4096
安装
cd /data/package/
sudo tar zxvf elasticsearch-oss-7.10.2-linux-x86_64.tar.gz -C /app/
sudo chown -R es:es /app/elasticsearch-7.10.2
修改配置文件
sudo vim /app/elasticsearch-7.10.2/config/elasticsearch.yml
#配置文件内容如下,不同节点着重修改:node.name、network.host参数
#集群名字,三个节点保持一致
cluster.name: elastic-7.10.2
##集群中当前的节点
node.name: szst-test-bigdata-12
#数据目录,配置到指定的分区挂载目录
path.data: /data/elasticsearch/data
#日志目录
path.logs: /data/elasticsearch/logs
#当前主机的ip地址及ES服务端口
network.host: szst-test-bigdata-12
#对外提供服务的端口
http.port: 9200
#集群内服务的端口
transport.tcp.port: 9300
#是不是有资格主节点
node.master: true
node.data: true
# head 插件需要这打开这两个配置
#*表示支持所有域名
http.cors.allow-origin: "*"
#是否支持跨域
http.cors.enabled: true
#linux安装es的一个bug解决的配置
bootstrap.system_call_filter: false
bootstrap.memory_lock: false
#es7.x 之后新增的配置,初始化一个新的集群时需要此配置来选举 master
cluster.initial_master_nodes:
- szst-test-bigdata-07
- szst-test-bigdata-12
- szst-test-bigdata-13
#es7.x 之后新增的配置,节点发现
discovery.seed_hosts:
- szst-test-bigdata-07:9300
- szst-test-bigdata-12:9300
- szst-test-bigdata-13:9300
#节点数除以2 向下取整加1
discovery.zen.minimum_master_nodes: 2
启动
su - es
#后台启动es
/app/elasticsearch-7.10.2/bin/elasticsearch -d
#停止es服务
/app/elasticsearch-7.10.2/bin/elasticsearch -q
登录地址查看是否启动成功
http://ip地址:9200/
随机启动配置
查看当前的开机启动服务
sudo chkconfig --list
创建es 的系统启动服务文件,进入到 cd /etc/init.d 目录,编写elasticsearch脚本
cd /etc/init.d
sudo vi elasticsearch
#!/bin/sh
# chkconfig: - 85 15
#description: elasticsearch
export ES_HOME=/app/elasticsearch-7.10.2
case "$1" in
start)
su es<<!
cd $ES_HOME
./bin/elasticsearch -d -p pid
!
echo "elasticsearch startup"
;;
stop)
kill -9 `cat $ES_HOME/pid`
echo "elasticsearch stopped"
;;
restart)
kill -9 `cat $ES_HOME/pid`
echo "elasticsearch stopped"
su es<<!
cd $ES_HOME
./bin/elasticsearch -d -p pid
!
echo "elasticsearch startup"
;;
*)
echo "start|stop|restart"
;;
esac
exit $?
修改文件权限
sudo chmod 777 /etc/init.d/elasticsearch
sudo chkconfig --add elasticsearch
sudo chkconfig --del elasticsearch
开启服务
sudo systemctl start elasticsearch
关闭随机启动
sudo chkconfig elasticsearch off
sudo systemctl disable elasticsearch
随机启动
sudo chkconfig elasticsearch on / sudo systemctl enable elasticsearch
kibana安装
cd /data/package/
tar -zxvf kibana-oss-7.10.2-linux-x86_64.tar.gz -C /data
mv /data/kibana-7.10.2-linux-x86_64 /data/kibana
sudo groupadd kibana
sudo useradd kibana
sudo chown -R kibana:kibana /data/kibana
vim /data/kibana/config/kibana.yml 内容如下:
#kibana 端口号
server.port: 5601
#当前机器IP地址
server.host: "192.168.11.99"
#elasticsearch的地址
elasticsearch.hosts: ["http://SZST-TEST-bigdata-07:9200","http://SZST-TEST-bigdata-12:9200","http://SZST-TEST-bigdata-13:9200"]
#kibana默认语言修改为简体中文
i18n.locale: "zh-CN"
cd /usr/lib/systemd/system
vim kibana.service 内容如下:
[Unit]
Description=kibana server
Documentation=https://prometheus.io/
After=network.target
[Service]
Type=simple
User=kibana
Group=kibana
ExecStart=/data/kibana/bin/kibana
Restart=on-failure
[Install]
WantedBy=multi-user.target
启动
sudo systemctl start kibana.service
sudo systemctl enable kibana.service
验证
打开浏览器访问 http://ip:5601 页面正常出现表示启动成功
logstash安装
cd /data/package/
sudo tar -zxvf logstash-oss-7.10.2-linux-x86_64.tar.gz -C /data
mv /data/logstash-7.10.2 /data/logstash
sudo groupadd logstash
sudo useradd logstash
sudo chown -R logstash:logstash /data/logstash
sudo rm -rf /data/logstash/config/logstash-smaple.conf
sudo vim /data/logstash/config/logstash.conf 内容如下:
input {
tcp {
port => "5044"
type => syslog
host => "0.0.0.0"
codec => "json"
}
}
input {
# 配置flink kafka数据源
kafka {
# 多个地址使用逗号分隔
bootstrap_servers => "SZST-TEST-bigdata-04:9092,SZST-TEST-bigdata-05:9092,SZST-TEST-bigdata-06:9092"
# 订阅的主题,支持订阅多个主题
topics => ["aegis_log_topic"]
consumer_threads => 5
type => flinklog
codec => "json"
}
}
filter {
if [type] == "syslog" {
mutate {
add_field => {"instance_name" => "%{app_name}-%{host}:%{app_port}" }
}
}
}
output {
if [type] == "syslog" {
elasticsearch {
#ES节点地址
hosts => ["http://SZST-TEST-bigdata-07:9200","http://SZST-TEST-bigdata-12:9200","http://SZST-TEST-bigdata-13:9200"]
index => "logs-%{app_name}-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}
}
if [type] == "flinklog" {
elasticsearch {
#ES节点地址
hosts => ["http://SZST-TEST-bigdata-07:9200","http://SZST-TEST-bigdata-12:9200","http://SZST-TEST-bigdata-13:9200"]
index => "flinklog-aegis_log_topic-%{+YYYY.MM.dd}"
}
}
}
sudo vim /data/logstash/config/pipelines.yml 内容如下:
- pipeline.id: logstash
#queue.type: persisted
#配置文件的路径
path.config: "/data/logstash/config/*.conf"
cd /usr/lib/systemd/system
vim logstash.service 内容如下:
[Unit]
Description=logstash
[Service]
Type=simple
User=logstash
Group=logstash
Environment=LS_HOME=/data/logstash
Environment=LS_SETTINGS_DIR=/data/logstash/config/
Environment=LS_USER=logstash
Environment=LS_GROUP=logstash
Environment=SERVICE_NAME=logstash
Environment=SERVICE_DESCRIPTION=logstash
ExecStart=/data/logstash/bin/logstash "-f" "/data/logstash/config/" "--config.reload.automatic"
Restart=always
WorkingDirectory=/data/logstash
Nice=19
LimitNOFILE=16384
[Install]
WantedBy=multi-user.target
启动
sudo systemctl start logstash.service
sudo systemctl enable logstash.service