1.java安装https://www.cnblogs.com/yoyo1216/p/12668926.html
2.ruby安装
2.2 创建目录
yum install -y gcc-c++ patch readline readline-devel zlib zlib-devel libyaml-devel libffi-devel openssl-devel make bzip2 autoconf automake libtool bison iconv-devel sqlite-devel mkdir /usr/local/ruby cd /usr/local/ruby
2.2 下载 # http://www.ruby-lang.org/en/downloads/
wget https://cache.ruby-lang.org/pub/ruby/2.7/ruby-2.7.1.tar.gz (logstash文件夹有这个包)
2.3 解压
tar -zxvf ruby-2.7.1.tar.gz cd ruby-2.7.1
2.4 安装编译
./configure --prefix=/usr/local/ruby && make && make install
2.5 配置环境变量
vim /etc/profile source /etc/profile export PATH=$PATH:/usr/local/ruby/bin
3 logstash-output-clickhouse安装
3.1 创建目录
mkdir /usr/local/logstash_output_clickhouse cd /usr/local/logstash_output_clickhouse
3.2 下载 # https://github.com/funcmike/logstash-output-clickhouse
wget https://codeload.github.com/funcmike/logstash-output-clickhouse/zip/master
3.3 解压
unzip logstash-output-clickhouse-master.zip cd logstash-output-clickhouse-master gem build logstash-output-clickhouse.gemspec logstash-plugin install logstash-output-clickhouse-0.1.0.gem 或者 logstash-plugin install file:///usr/local/logstash_output_clickhouse/logstash-offline-plugins-7.7.0.zip
4 logstash安装
4.1 创建文件夹 推荐这种方式安装
mkdir /usr/local/logstash mkdir /usr/local/logstash/config mkdir /usr/local/logstash/logs cd /usr/local/logstash
4.2 下载包 https://www.elastic.co/cn/downloads/past-releases 包下载地址 下载logstash-oss版本
wget https://artifacts.elastic.co/downloads/logstash/logstash-oss-7.7.0.tar.gz
4.3 解压
tar -zxvf logstash-oss-7.7.0.tar.gz
4.4 配置环境变量
vim /etc/profile source /etc/profile export PATH=$PATH:/usr/local/logstash/logstash-7.7.0/bin
4.5 修改配置文件
vim /usr/local/logstash/logstash-7.7.0/config/logstash.yml node.name: shunwang # 节点名称 方便识别 path.data # 持久化存储数据的文件夹,默认是logstash home目录下的data path.config # 设定pipeline配置文件目录 path.logs: /usr/local/logstash/logs # 设定pipeline日志文件目录 pipeline.workers # 设定pipeline的线程数 pipeline.batch.size/delay # 设定批量处理数据的数目和延迟 queue.type # 设定队列类型默认是memory 改成queue.type:persisted queue.max_dytes # 队列总容量 默认是1024mb 改成queue.max_dytes:4096mb http.host: "0.0.0.0"
5 supervisord部署
5.1 安装supervisord
yum install -y epel-release yum install -y supervisor
5.2 ck-ruby.rb
# the value of `params` is the value of the hash passed to `script_params` # in the logstash configuration def register(params) # 这里通过params获取的参数是在logstash文件中通过script_params传入的 @message = params["message"] end # the filter method receives an event and must return a list of events. # Dropping an event means not including it in the return array, # while creating new ones only requires you to add a new instance of # LogStash::Event to the returned array # 这里通过参数event可以获取到所有input中的属性 def filter(event) messages = event.get('message').split('|') @stream_event_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'event_Name', 'event_type', 'status', 'message', 'local_delay', 'node_delay', 'down_speed', 'disconnect_count', 'fps', 'local_fluctuate_count', 'node_fluctuate_count', 'stream_num'] @stream_event_key2 = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'sn', 'event_Name', 'event_type', 'status', 'message', 'local_delay', 'node_delay', 'down_speed', 'disconnect_count', 'fps', 'local_fluctuate_count', 'node_fluctuate_count', 'stream_num'] @click_event_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'sn', 'event_Name', 'app_id', 'platform', 'app_origin_id', 'app_comment_id', 'click_place'] @client_open_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'event_Name', 'status'] @client_open_key2 = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'sn', 'event_Name', 'status'] @client_update_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'event_Name', 'target_version', 'result'] @login_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'event_Name', 'member_type', 'result'] @login_key2 = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'sn', 'event_Name', 'member_type', 'result'] @post_click_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'event_Name', 'app_id', 'community_place', 'post_id'] @post_exposure_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'event_Name', 'app_id', 'community_place', 'post_id'] @put_click_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'sn', 'event_Name', 'put_type', 'put_num', 'app_id'] @put_exposure_key = ['ip', 'add_time', 'session_id', 'member_id', 'client_type', 'client_version', 'mac', 'sn', 'event_Name', 'put_type', 'put_num', 'app_id'] if event.get('message').include? "streamEvent" if messages.length == 22 for i in 0..@stream_event_key2.length if messages[i].nil? next end if @stream_event_key2[i] == "add_time" event.set(@stream_event_key2[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@stream_event_key2[i], messages[i]) end else event.set("sn", "") for i in 0..@stream_event_key.length if messages[i].nil? next end if @stream_event_key[i] == "add_time" event.set(@stream_event_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@stream_event_key[i], messages[i]) end end elsif event.get('message').include? "clickEvent" for i in 0..@click_event_key.length if messages[i].nil? next end if @click_event_key[i] == "add_time" event.set(@click_event_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@click_event_key[i], messages[i]) end elsif event.get('message').include? "clientOpen" if messages.length == 10 for i in 0..@client_open_key2.length if messages[i].nil? next end if @client_open_key2[i] == "add_time" event.set(@client_open_key2[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@client_open_key2[i], messages[i]) end else for i in 0..@client_open_key.length if messages[i].nil? next end if @client_open_key[i] == "add_time" event.set(@client_open_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@client_open_key[i], messages[i]) end end elsif event.get('message').include? "clientUpdate" for i in 0..@client_update_key.length if messages[i].nil? next end if @client_update_key[i] == "add_time" event.set(@client_update_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@client_update_key[i], messages[i]) end elsif event.get('message').include? "login" if messages.length == 11 for i in 0..@login_key2.length if messages[i].nil? next end if @login_key2[i] == "add_time" event.set(@login_key2[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@login_key2[i], messages[i]) end else event.set("sn", "") for i in 0..@login_key.length if messages[i].nil? next end if @login_key[i] == "add_time" event.set(@login_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@login_key[i], messages[i]) end end elsif event.get('message').include? "postClick" for i in 0..@post_click_key.length if messages[i].nil? next end if @post_click_key[i] == "add_time" event.set(@post_click_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@post_click_key[i], messages[i]) end elsif event.get('message').include? "postExposure" for i in 0..@post_exposure_key.length if messages[i].nil? next end if @post_exposure_key[i] == "add_time" event.set(@post_exposure_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@post_exposure_key[i], messages[i]) end elsif event.get('message').include? "putClick" for i in 0..@put_click_key.length if messages[i].nil? next end if @put_click_key[i] == "add_time" event.set(@put_click_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@put_click_key[i], messages[i]) end elsif event.get('message').include? "putExposure" for i in 0..@put_exposure_key.length if messages[i].nil? next end if @put_exposure_key[i] == "add_time" event.set(@put_exposure_key[i], (Time.at messages[i].to_i).strftime("%Y-%m-%d %H:%M:%S")) next end event.set(@put_exposure_key[i], messages[i]) end end # elsif _cache # idx = _message.index("All params Cached") # event.set('json', _message[idx + "All params Cached: ".length,_message.length-(idx + "All params Cached".length)+1]) # # 往event中增加新的属性 # event.set('cache_type', 'Cached') # else # return [] # return empty array to cancel event # end # event.set('for_time', (event.get('@timestamp').time.localtime + 8*60*60).strftime('%Y-%m-%d %H:%M:%S')) # event.set('for_date', (event.get('@timestamp').time.localtime + 8*60*60).strftime('%Y-%m-%d')) return [event] end
5.3 ck-logstash.conf
input { file { path => ["/data/www/ROOT/txt/*.txt"] start_position => "beginning" } } filter { mutate { gsub => ["message", "\t", "|"] } ruby { path => "/usr/local/logstash/config/ck-ruby.rb" } mutate { convert => ["member_id", "integer"] remove_field => ["@version", "host", "@timestamp", "path"] } if [event_Name] == "streamEvent"{ mutate { convert => ["event_type", "integer"] convert => ["status", "integer"] convert => ["local_delay", "integer"] convert => ["node_delay", "integer"] convert => ["down_speed", "integer"] convert => ["disconnect_count", "integer"] convert => ["fps", "integer"] convert => ["local_fluctuate_count", "integer"] convert => ["node_fluctuate_count", "integer"] } } if [event_Name] == "otherClick"{ mutate { convert => ["app_id", "integer"] convert => ["click_place", "integer"] } } if [event_Name] == "clientOpen"{ mutate { convert => ["status", "integer"] } } if [event_Name] == "clientUpdate"{ mutate { convert => ["result", "integer"] } } if [event_Name] == "login"{ mutate { convert => ["member_type", "integer"] convert => ["result", "integer"] } } if [event_Name] == "postClick"{ mutate { convert => ["app_id", "integer"] convert => ["post_id", "integer"] convert => ["community_place", "integer"] } } if [event_Name] == "postExposure"{ mutate { convert => ["app_id", "integer"] convert => ["post_id", "integer"] convert => ["community_place", "integer"] } } if [event_Name] == "putClick"{ mutate { convert => ["put_num", "integer"] convert => ["app_id", "integer"] } } if [event_Name] == "putExposure"{ mutate { convert => ["put_num", "integer"] convert => ["app_id", "integer"] } } if [event_Name] == "clickEvent"{ mutate { convert => ["app_id", "integer"] convert => ["platform", "integer"] convert => ["app_comment_id", "integer"] convert => ["click_place", "integer"] } } } output { if [event_Name] == "streamEvent"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.stream_event" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { ip => ip add_time => add_time session_id => session_id member_id => member_id client_type => client_type client_version => client_version mac => mac event_type => event_type status => status message => message local_delay => local_delay node_delay => node_delay down_speed => down_speed disconnect_count => disconnect_count fps => fps local_fluctuate_count => local_fluctuate_count node_fluctuate_count => node_fluctuate_count stream_num => stream_num sn => sn } } } if [event_Name] == "otherClick"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.click_event" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id app_id => app_id click_place => click_place add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "clientOpen"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.client_open" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { status => status add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "clientUpdate"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.client_update" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { target_version => target_version result => result add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "login"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.login" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id member_type => member_type result => result add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip sn => sn } } } if [event_Name] == "postClick"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.post_click" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id app_id => app_id post_id => post_id community_place => community_place add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "postExposure"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.post_exposure" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id app_id => app_id post_id => post_id community_place => community_place add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "putClick"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.put_click" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id put_type => put_type put_num => put_num app_id => app_id add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "putExposure"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.put_exposure" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id put_type => put_type add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip put_num => put_num app_id => app_id } } } if [event_Name] == "clickEvent"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center.click_event" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id platform => platform add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip app_id => app_id app_origin_id => app_origin_id app_comment_id => app_comment_id click_place => click_place } } } }
5.4 ck-logstash_yun.conf
input { file { path => ["/data/www/ROOT/txt-yun/*.txt"] start_position => "beginning" } } filter { mutate { gsub => ["message", "\t", "|"] } ruby { path => "/usr/local/logstash/config/ck-ruby.rb" } mutate { convert => ["member_id", "integer"] remove_field => ["@version", "host", "@timestamp", "path"] } if [event_Name] == "streamEvent"{ mutate { convert => ["event_type", "integer"] convert => ["status", "integer"] convert => ["local_delay", "integer"] convert => ["node_delay", "integer"] convert => ["down_speed", "integer"] convert => ["disconnect_count", "integer"] convert => ["fps", "integer"] convert => ["local_fluctuate_count", "integer"] convert => ["node_fluctuate_count", "integer"] } } if [event_Name] == "otherClick"{ mutate { convert => ["app_id", "integer"] convert => ["click_place", "integer"] } } if [event_Name] == "clientOpen"{ mutate { convert => ["status", "integer"] } } if [event_Name] == "clientUpdate"{ mutate { convert => ["result", "integer"] } } if [event_Name] == "login"{ mutate { convert => ["member_type", "integer"] convert => ["result", "integer"] } } if [event_Name] == "postClick"{ mutate { convert => ["app_id", "integer"] convert => ["post_id", "integer"] convert => ["community_place", "integer"] } } if [event_Name] == "postExposure"{ mutate { convert => ["app_id", "integer"] convert => ["post_id", "integer"] convert => ["community_place", "integer"] } } if [event_Name] == "putClick"{ mutate { convert => ["put_num", "integer"] convert => ["app_id", "integer"] } } if [event_Name] == "putExposure"{ mutate { convert => ["put_num", "integer"] convert => ["app_id", "integer"] } } if [event_Name] == "clickEvent"{ mutate { convert => ["app_id", "integer"] convert => ["platform", "integer"] convert => ["app_comment_id", "integer"] convert => ["click_place", "integer"] } } } output { if [event_Name] == "streamEvent"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.stream_event" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { ip => ip add_time => add_time session_id => session_id member_id => member_id client_type => client_type client_version => client_version mac => mac event_type => event_type status => status message => message local_delay => local_delay node_delay => node_delay down_speed => down_speed disconnect_count => disconnect_count fps => fps local_fluctuate_count => local_fluctuate_count node_fluctuate_count => node_fluctuate_count stream_num => stream_num sn => sn } } } if [event_Name] == "otherClick"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.click_event" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id app_id => app_id click_place => click_place add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "clientOpen"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.client_open" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { status => status add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "clientUpdate"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.client_update" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { target_version => target_version result => result add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "login"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.login" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id member_type => member_type result => result add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip sn => sn } } } if [event_Name] == "postClick"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.post_click" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id app_id => app_id post_id => post_id community_place => community_place add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "postExposure"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.post_exposure" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id app_id => app_id post_id => post_id community_place => community_place add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "putClick"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.put_click" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id put_type => put_type put_num => put_num app_id => app_id add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip } } } if [event_Name] == "putExposure"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.put_exposure" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id put_type => put_type add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip put_num => put_num app_id => app_id } } } if [event_Name] == "clickEvent"{ clickhouse { headers => ["authentication", "ck ur6LMIUD"] http_hosts => ["http://172.25.17.39:8123/"] table => "data_center_yun.click_event" request_tolerance => 1 flush_size => 1000 pool_max => 100 mutations => { member_id => member_id platform => platform add_time => add_time client_type => client_type client_version => client_version session_id => session_id mac => mac ip => ip app_id => app_id app_origin_id => app_origin_id app_comment_id => app_comment_id click_place => click_place } } } }
5.5 supervisord.conf
; Sample supervisor config file. ; ; For more information on the config file, please see: ; http://supervisord.org/configuration.html ; ; Notes: ; - Shell expansion ("~" or "$HOME") is not supported. Environment ; variables can be expanded using this syntax: "%(ENV_HOME)s". ; - Quotes around values are not supported, except in the case of ; the environment= options as shown below. ; - Comments must have a leading space: "a=b ;comment" not "a=b;comment". ; - Command will be truncated if it looks like a config file comment, e.g. ; "command=bash -c 'foo ; bar'" will truncate to "command=bash -c 'foo ". ; ; Warning: ; Paths throughout this example file use /tmp because it is available on most ; systems. You will likely need to change these to locations more appropriate ; for your system. Some systems periodically delete older files in /tmp. ; Notably, if the socket file defined in the [unix_http_server] section below ; is deleted, supervisorctl will be unable to connect to supervisord. [unix_http_server] file=/var/run/supervisor.sock ; the path to the socket file ;chmod=0700 ; socket file mode (default 0700) ;chown=nobody:nogroup ; socket file uid:gid owner ;username=user ; default is no username (open server) ;password=123 ; default is no password (open server) ; Security Warning: ; The inet HTTP server is not enabled by default. The inet HTTP server is ; enabled by uncommenting the [inet_http_server] section below. The inet ; HTTP server is intended for use within a trusted environment only. It ; should only be bound to localhost or only accessible from within an ; isolated, trusted network. The inet HTTP server does not support any ; form of encryption. The inet HTTP server does not use authentication ; by default (see the username= and password= options to add authentication). ; Never expose the inet HTTP server to the public internet. [inet_http_server] ; inet (TCP) server disabled by default port=0.0.0.0:9001 ; ip_address:port specifier, *:port for all iface username=user ; default is no username (open server) password=123 ; default is no password (open server) [supervisord] logfile=/usr/local/logstash/config/supervisord.log ; main log file; default $CWD/supervisord.log logfile_maxbytes=500MB ; max main logfile bytes b4 rotation; default 50MB logfile_backups=10 ; # of main logfile backups; 0 means none, default 10 loglevel=info ; log level; default info; others: debug,warn,trace pidfile=/usr/local/logstash/config/supervisord.pid ; supervisord pidfile; default supervisord.pid nodaemon=false ; start in foreground if true; default false minfds=1024 ; min. avail startup file descriptors; default 1024 minprocs=200 ; min. avail process descriptors;default 200 ;umask=022 ; process file creation umask; default 022 ;user=supervisord ; setuid to this UNIX account at startup; recommended if root ;identifier=supervisor ; supervisord identifier, default is 'supervisor' ;directory=/tmp ; default is not to cd during start ;nocleanup=true ; don't clean up tempfiles at start; default false ;childlogdir=/tmp ; 'AUTO' child log dir, default $TEMP ;environment=KEY="value" ; key value pairs to add to environment ;strip_ansi=false ; strip ansi escape codes in logs; def. false ; The rpcinterface:supervisor section must remain in the config file for ; RPC (supervisorctl/web interface) to work. Additional interfaces may be ; added by defining them in separate [rpcinterface:x] sections. [rpcinterface:supervisor] supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface ; The supervisorctl section configures how supervisorctl will connect to ; supervisord. configure it match the settings in either the unix_http_server ; or inet_http_server section. [supervisorctl] serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket ;serverurl=http://0.0.0.0:9001 ; use an http:// url to specify an inet socket ;username=user ; should be same as in [*_http_server] if set ;password=123 ; should be same as in [*_http_server] if set ;prompt=mysupervisor ; cmd line prompt (default "supervisor") ;history_file=~/.sc_history ; use readline history if available ; The sample program section below shows all possible program subsection values. ; Create one or more 'real' program: sections to be able to control them under ; supervisor. ;[program:theprogramname] ;command=/bin/cat ; the program (relative uses PATH, can take args) ;process_name=%(program_name)s ; process_name expr (default %(program_name)s) ;numprocs=1 ; number of processes copies to start (def 1) ;directory=/tmp ; directory to cwd to before exec (def no cwd) ;umask=022 ; umask for process (default None) ;priority=999 ; the relative start priority (default 999) ;autostart=true ; start at supervisord start (default: true) ;startsecs=1 ; # of secs prog must stay up to be running (def. 1) ;startretries=3 ; max # of serial start failures when starting (default 3) ;autorestart=unexpected ; when to restart if exited after running (def: unexpected) ;exitcodes=0 ; 'expected' exit codes used with autorestart (default 0) ;stopsignal=QUIT ; signal used to kill process (default TERM) ;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) ;stopasgroup=false ; send stop signal to the UNIX process group (default false) ;killasgroup=false ; SIGKILL the UNIX process group (def false) ;user=chrism ; setuid to this UNIX account to run the program ;redirect_stderr=true ; redirect proc stderr to stdout (default false) ;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) ;stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10) ;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) ;stdout_events_enabled=false ; emit events on stdout writes (default false) ;stdout_syslog=false ; send stdout to syslog with process name (default false) ;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO ;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) ;stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) ;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) ;stderr_events_enabled=false ; emit events on stderr writes (default false) ;stderr_syslog=false ; send stderr to syslog with process name (default false) ;environment=A="1",B="2" ; process environment additions (def no adds) ;serverurl=AUTO ; override serverurl computation (childutils) ; The sample eventlistener section below shows all possible eventlistener ; subsection values. Create one or more 'real' eventlistener: sections to be ; able to handle event notifications sent by supervisord. ;[eventlistener:theeventlistenername] ;command=/bin/eventlistener ; the program (relative uses PATH, can take args) ;process_name=%(program_name)s ; process_name expr (default %(program_name)s) ;numprocs=1 ; number of processes copies to start (def 1) ;events=EVENT ; event notif. types to subscribe to (req'd) ;buffer_size=10 ; event buffer queue size (default 10) ;directory=/tmp ; directory to cwd to before exec (def no cwd) ;umask=022 ; umask for process (default None) ;priority=-1 ; the relative start priority (default -1) ;autostart=true ; start at supervisord start (default: true) ;startsecs=1 ; # of secs prog must stay up to be running (def. 1) ;startretries=3 ; max # of serial start failures when starting (default 3) ;autorestart=unexpected ; autorestart if exited after running (def: unexpected) ;exitcodes=0 ; 'expected' exit codes used with autorestart (default 0) ;stopsignal=QUIT ; signal used to kill process (default TERM) ;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) ;stopasgroup=false ; send stop signal to the UNIX process group (default false) ;killasgroup=false ; SIGKILL the UNIX process group (def false) ;user=chrism ; setuid to this UNIX account to run the program ;redirect_stderr=false ; redirect_stderr=true is not allowed for eventlisteners ;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) ;stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10) ;stdout_events_enabled=false ; emit events on stdout writes (default false) ;stdout_syslog=false ; send stdout to syslog with process name (default false) ;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO ;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) ;stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) ;stderr_events_enabled=false ; emit events on stderr writes (default false) ;stderr_syslog=false ; send stderr to syslog with process name (default false) ;environment=A="1",B="2" ; process environment additions ;serverurl=AUTO ; override serverurl computation (childutils) ; The sample group section below shows all possible group values. Create one ; or more 'real' group: sections to create "heterogeneous" process groups. ;[group:thegroupname] ;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions ;priority=999 ; the relative start priority (default 999) ; The [include] section can just contain the "files" setting. This ; setting can list multiple files (separated by whitespace or ; newlines). It can also contain wildcards. The filenames are ; interpreted as relative to this file. Included files *cannot* ; include files themselves. [program:supervisord_logstash] command=/usr/local/logstash/logstash-7.7.0/bin/logstash -f /usr/local/logstash/config/conf -r # 启动多个 配置文件放在这个目录中 如果是一个指定具体的conf文件 directory=/usr/local/logstash/logstash-7.7.0/bin stdout_logfile=/usr/local/logstash/config/supervisord_logstash.log stdout_logfile_maxbytes=50MB stdout_capture_maxbytes=10MB ; number of bytes in 'capturemode' (default 0) stdout_events_enabled=false redirect_stderr=true autostart=true autorestart=true startsecs=10 stopwaitsecs=600 priority=998 user=root [include] files = /usr/local/logstash/config/*.ini
5.6 启动项目
supervisord -c /usr/local/logstash/config/supervisord.conf #启动 ps -uax | grep supervisord # 查看是否存在supervisord进程 supervisorctl -c /usr/local/logstash/config/supervisord.conf status reload restart tomcat stop tomcat start tomcat exit