相关配置
cat /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
paths:
- /home/www/nangua/www/Runtime/Logs/*/*
tags: ["jfm-admin-log"]
close_timeout: 3h
clean_inactive: 72h
ignore_older: 70h
close_inactive: 5m
- type: log
paths:
- /home/www/nangua_new/runtime/logs/pm2-4001-error.log
- /home/www/nangua_new/runtime/logs/pm2-4001-out.log
tags: ["jfm-newapi-pm2-log"]
close_timeout: 3h
clean_inactive: 72h
ignore_older: 70h
close_inactive: 5m
- type: log
paths:
- /home/www/nangua_new/runtime/logs/controller/*.log
- /home/www/nangua_new/runtime/logs/helper/*.log
- /home/www/nangua_new/runtime/logs/sdk/*.log
tags: ["jfm-newapi-hyperf-log"]
close_timeout: 3h
clean_inactive: 72h
ignore_older: 70h
close_inactive: 5m
output.logstash:
hosts: ["logstash1.rrmen0.com:5043"]
cat elasticsearch.yml
# 配置es集群名称,相同名称的集群会自动识别
cluster.name: elk-cluster
# es7.0集群节点名称会自动获取本机hostname,如果不是多实例部署,可不配置该项
node.name: slave-node-1
# 是否为主节点
node.master: true
# 是否开启数据节点
node.data: true
# 预处理数据
node.ingest: true
# 指定数据存放目录,多目录逗号分隔,可以配置相对ES根目录路径
path.data: /data/es/data
# 指定日志存放目录,可以配置相对ES根目录路径
path.logs: /data/es/logs
# 指定本机ip地址
network.host: 10.9.11.51
# 指定http协议端口,默认9200,多实例部署时需要修改
http.port: 9200
# 指定tcp协议端口,默认9300,多实例部署时需要修改
transport.tcp.port: 9300
# 是否允许跨域请求
http.cors.enabled: true
# 允许跨域请求的地址,*代表所有
http.cors.allow-origin: "*"
#设置为true锁住内存,当服务混合部署了多个组件及服务时,应开启此操作,允许es占用足够多的内存。
bootstrap.memory_lock: true
# 广播节点
discovery.seed_hosts: ["master-node-1","slave-node-1","slave-node-2"]
# 指定主节点列表,需要在每个节点上配置该参数,可以是节点名称,也可以是IP地址
cluster.initial_master_nodes: ["master-node-1","slave-node-1","slave-node-2"]
transport.tcp.compress: true
# 在完全集群重新启动后阻止初始恢复,直到n个节点启动
gateway.recover_after_nodes: 3
##master数量至少3个,避免脑裂
discovery.zen.minimum_master_nodes: 2
indices.breaker.request.limit: "90%"
indices.queries.cache.size: "90%"
##查询request请求的DSL语句缓存,被缓存的DSL语句下次请求时不会被二次解析,可提升检索性能,默认值是1%。
indices.requests.cache.size: "50%"
##设置字段缓存的最大值,默认无限制。
indices.fielddata.cache.size: "50%"
##用来对索引数据进行冷热分离,需要注意的是 setting 中也要进行相关配置 "index.routing.allocation.require.box_type": "hot"
node.attr.box_type: "hot"
##为 Painless 打开正则表达式匹配
script.painless.regex.enabled: true
03-kafka-input.conf
input {
kafka {
bootstrap_servers => "kafka1.zoopkeeper.cn:9092,kafka2.zoopkeeper.cn:9092,kafka3.zoopkeeper.cn:9092"
client_id => "logstash"
topics => ["nginxlog","jxnginxlog"]
group_id => "console-consumer-49303"
consumer_threads => 5
decorate_events => true
}
}
cat 10-filebeat-filter.conf
filter {
if [message] =~ /^{.*}$/ {
ruby {
code =>
"event.set 'message', JSON.parse(event.get('message')).to_json;"
}
json { source => "message" } }
# if [tags] in ["xcnginxaccesslog","xxnginxaccesslog","mmnginxaccesslog","mm2nginxaccesslog","xcurlnginxaccesslog","xcadminnginxaccesslog","ybpaynginxaccesslog","xynginxaccesslog","qiniunginxaccesslog","xyadminnginxaccesslog","xyybpaynginxaccesslog","92nginxaccesslog","91nginxaccesslog","qtnginxaccesslog","91-92-qtnginxaccesslog","admin-nginxaccesslog","ybpay-nginxaccesslog","hcnginxaccesslog","hxingnginxaccesslog","hxnginxaccesslog","boma365nginxaccesslog","bomaaffnginxaccesslog","hxadminnginxaccesslog","boma365adminnginxaccesslog","hxpaynginxaccesslog","hxybpaynginxaccesslog","qt2nginxaccesslog","93nginxaccesslog","jxnginxaccesslog","jinjiunginxaccesslog"]
grok {
break_on_match => true
match => [
"message", "%{IPORHOST:remote_addr} - %{USER:remote_user} \[%{HTTPDATE:time_local}\] (?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})%{NUMBER:response_status} (?:%{NUMBER:bytes_sent}) %{GREEDYDATA:request_body} (?:(?:%{URI:referrer}|-)|%{QS:referrer})%{QS:search} %{DATA:http_x_forwarded_for} %{USERNAME:hosts} %{BASE16FLOAT:request_time} (%{BASE16FLOAT:upstream_response_time}|-) %{GREEDYDATA:upstream_cache_status}",
"message", "%{IPORHOST:remote_addr} - %{USER:remote_user} \[%{HTTPDATE:time_local}\] (?:%{WORD:verb} %{NOTSPACE:request}(?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})%{NUMBER:response_status} (?:%{NUMBER:bytes_sent}) %{GREEDYDATA:request_body} (?:(?:%{URI:referrer}|-)|%{QS:referrer})"
]
add_field => {"nginxlog_time" => "%{time_local}"}
remove_field => ["agent","agent.hostname","message","@version","offset","agent.ephemeral_id","agent.id","agent.type","agent.version","ecs.version","host.id","host.name","host.os.name","host.os.version","host.os.family","host.architecture","host.containerized","host.hostname","host.id","host.os.family","host.os.kernel","host.os.name","host.os.platform","host.os.version"]
}
date {
match => [ "nginxlog_time", "dd/MMM/yyyy:HH:mm:ss Z" ]
locale => "en-US"
timezone => "Asia/Shanghai"
target => "nginxlog_time"
}
mutate {
split => [ "upstream_response_time", "," ]
}
mutate {
convert => [ "upstream_response_time", "float" ]
convert => [ "request_time", "float"]
convert => [ "bytes_sent", "integer"]
}
geoip {
source => "remote_addr"
target => "geoip"
database => "/usr/local/webserver/logstash-7.6.0/geoip/GeoLite2-City.mmdb"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
30-elasticsearch-output.conf
output {
elasticsearch {
hosts => ["slave-node-1:9200","slave-node-2:9200","master-node-1:9200"]
manage_template => true
template => "/usr/local/webserver/logstash-7.6.0/config/elasticsearch-template.json"
template_name => "logstash"
template_overwrite => true
}
[root@slave-node1 config]# cat server.properties
broker.id=2
listeners=PLAINTEXT://kafka2.zoopkeeper.cn:9092
#advertised_listeners=PLAINTEXT://205.252.16.156:9092
num.network.threads=9
num.io.threads=16
socket.send.buffer.bytes=1048576000
socket.receive.buffer.bytes=1048576000
socket.request.max.bytes=262144000
log.dirs=/data/kafka-logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
log.cleaner.enable=true
zookeeper.connect=kafka1.zoopkeeper.cn:2181,kafka2.zoopkeeper.cn:2181,kafka3.zoopkeeper.cn:2181
zookeeper.connection.timeout.ms=1000000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
cat zookeeper.properties
tickTime=2000
initLimit=20
syncLimit=10
server.1=kafka1.zoopkeeper.cn:2888:3888
server.2=kafka2.zoopkeeper.cn:2888:3888
server.3=kafka3.zoopkeeper.cn:2888:3888
location ~ .*\.(?:css(\.map)?|js(\.map)?|jpe?g|plist|apk|exe|msi|jpg|swf|bmp|htm|png|gif|ico|cur|heic|webp|tiff?|mp3|m4a|aac|ogg|midi?|wav|mp4|svg|woff|mov|ttf|ttc|otf|eot|woff2?|webm|mpe?g|avi|ogv|flv|wmv)$
{
proxy_next_upstream http_502 http_504 error timeout invalid_header;
proxy_cache xingfustaticcache;
proxy_cache_valid 200 304 30d;
proxy_cache_key $uri$is_args$args;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
proxy_ignore_headers "Cache-Control";
proxy_hide_header "Cache-Control";
proxy_ignore_headers "Expires";
proxy_hide_header "Expires";
#proxy_hide_header "Set-Cookie";
#proxy_ignore_headers "Set-Cookie";
#add_header Cache-Control max-age=60;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 60s;
expires 7d;
proxy_pass http://default_upstream_xingfu;
}
location /api/lottery/
{
limit_req zone=xingfu_speedlimitto3_s burst=5 nodelay;
add_header Cache-Control private;
proxy_next_upstream http_502 http_504 error timeout invalid_header;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 60s;
proxy_pass http://default_upstream_xingfu;
}
location /api/account/
{
limit_req zone=xingfu_speedlimitto4_s burst=5 nodelay;
add_header Cache-Control private;
proxy_next_upstream http_502 http_504 error timeout invalid_header;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 60s;
proxy_pass http://default_upstream_xingfu;
}
location /api/auth/
{
limit_req zone=xingfu_speedlimitto3_s burst=8 nodelay;
add_header Cache-Control private;
proxy_next_upstream http_502 http_504 error timeout invalid_header;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 60s;
proxy_pass http://default_upstream_xingfu;
}
#!/bin/bash
host=`cat /home/patsy/sc_pre.txt | xargs -n1`
file=/var/spool/cron/root
function_insert(){
for i in $host;
do
echo "打印ip列表 $i"
if [ -f "$file" ];then
echo "文件存在"
ssh root@$i "cp /var/spool/cron/root /home/patsy" && ssh root@$i "sed '$ a * * * * * /usr/sbin/ntpdate 172.254.1.253 &> /dev/null' -i $file"
else
"文件不存在"
exit 1
fi
# ssh root@$i "sed -i '/ntpdate/d' $file"
done
}
function_ntp_close(){
for i in $host;
do
ps -ef |grep 'ntpd -u' |grep -v grep | awk '{print $2}'
if [ $? -eq 0 ];
then
/etc/init.d/ntpd stop
else
echo "ntp服务已经关闭"
fi
done
}
function_delete_crond(){
for i in $host;
do
echo "打印ip列表 $i"
ssh root@$i "sed -i '/ntpdate/d' $file"
done
}
read_input(){
read -p "Please choose 1/2/3 : " char
case $char in
1)
function_insert
;;
2)
function_ntp_close
;;
3)
function_delete_crond
;;
*)
echo "error"
;;
esac
}
read_input
LOG=/var/log/scpre_code_rsync.log
DATA=`date +"%F %H:%M:%S"`
echo $DATA >> $LOG
for i in 127.0.0.1 10.10.11.106 10.10.11.159 10.10.11.154
do
(
echo "-------------------" >> $LOG
echo "To Server $i" >> $LOG
rsync -vzrltD --delete /data/gitlab/aa-code/ root@"$i":/www/releases/preproduct/ --exclude-from="/data/gitlab/aa-code/.syncignore" -e "ssh -p 22" >> $LOG
) &
done
wait
echo "exclude files:/data/gitlab/sc-code/.syncignore" >> $LOG
echo "$DATA Done" >> $LOG
echo "==========" >> $LOG
echo " " >> $LOG