elk 中kafka启动脚本和配置文件

kafka启动脚本和配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
# more kafka
#!/bin/sh 
# Init script for kafka 
### BEGIN INIT INFO 
# Provides:          kafka 
# Default-Start:     2 3 4 5 
# Default-Stop:      0 1 6 
# Short-Description: 
# Description:        Starts kafka as a daemon. 
### END INIT INFO 
   
PATH=$PATH:/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/kafka/bin
export PATH 
export JAVA_HOME=/usr/local/java
 
# adirname - return absolute dirname of given file
adirname() { odir=`pwd`; cd `dirname $1`; pwd; cd "${odir}"; }
MYNAME=`basename "$0"`
MYDIR=`adirname "$0"`
name="${MYNAME}"
 
KAFKA_USER=elasticsearch 
KAFKA_GROUP=elasticsearch 
KAFKA_HOME=/usr/local/kafka 
KAFKA_LOG_DIR="${MYDIR}/logs"
KAFKA_CONF_DIR="${MYDIR}/config"
KAFKA_CONF_FILENAME=server.properties
JMX_PORT=5760
KAFKA_HEAP_OPTS="-Xms1G -Xmx1G -XX:NewRatio=2 -XX:SurvivorRatio=8 -XX:MaxMetaspaceSize=512M -XX:CompressedClassSpaceSize=512M -X
loggc:$KAFKA_LOG_DIR/gc.log -verbose:gc -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCDateStamps -XX:+PrintGCDetails"
 
KAFKA_LOG_FILE="${KAFKA_LOG_DIR}/$name.log" 
pidfile="${KAFKA_LOG_DIR}/$name.pid" 
KAFKA_CONF_DIR_FILE="$KAFKA_CONF_DIR/$KAFKA_CONF_FILENAME"
KILL_ON_STOP_TIMEOUT=${KILL_ON_STOP_TIMEOUT-0} #default value is zero to this variable but could be updated by user request 
KAFKA_OPTS="" 
KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$KAFKA_CONF_DIR/log4j.properties"
 
[ -r /etc/default/$name ] && . /etc/default/$name 
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name 
   
program=$KAFKA_HOME/bin/kafka-server-start.sh 
args=" ${KAFKA_CONF_DIR_FILE}" 
   
quiet() { 
  "$@" > /dev/null 2>&1 
  return $? 
   
start() { 
   
  KAFKA_JAVA_OPTS="${KAFKA_OPTS} -Djava.io.tmpdir=${KAFKA_HOME}" 
  HOME=${KAFKA_HOME}
 
  export PATH HOME KAFKA_JAVA_OPTS KAFKA_HEAP_OPTS JMX_PORT
   
  # Run the program! 
  $program $args > ${KAFKA_LOG_DIR}/$name.stdout 2> "${KAFKA_LOG_DIR}/$name.err"
   
  # Generate the pidfile from here. If we instead made the forked process 
  # generate it there will be a race condition between the pidfile writing 
  # and a process possibly asking for status. 
  echo $! > $pidfile 
   
  echo "$name started." 
  return
   
stop() { 
  # Try a few times to kill TERM the program 
  if status ; then 
    pid=`cat "$pidfile"`
    echo "Killing $name (pid $pid) with SIGTERM"
    ps -ef |grep $pid |grep -v 'grep' |awk '{print $2}' | xargs kill -9 
    # Wait for it to exit. 
    for i in 1 2 3 4 5 6 7 8 9 ; do 
      echo "Waiting $name (pid $pid) to die..." 
      status || break 
      sleep 1 
    done 
    if status ; then 
      if [ $KILL_ON_STOP_TIMEOUT -eq 1 ] ; then 
        echo "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss." 
        kill -KILL $pid 
        echo "$name killed with SIGKILL." 
      else 
        echo "$name stop failed; still running." 
        return 1 # stop timed out and not forced 
      fi 
    else 
      echo "$name stopped." 
    fi 
  fi 
   
status() { 
  if [ -f "$pidfile" ] ; then 
    pid=`cat "$pidfile"
    if kill -0 $pid > /dev/null 2> /dev/null ; then 
      # process by this pid is running. 
      # It may not be our pid, but that's what you get with just pidfiles. 
      # TODO(sissel): Check if this process seems to be the same as the one we 
      # expect. It'd be nice to use flock here, but flock uses fork, not exec, 
      # so it makes it quite awkward to use in this case. 
      return
    else 
      return 2 # program is dead but pid file exists 
    fi 
  else 
    return 3 # program is not running 
  fi 
   
configtest() { 
  # Check if a config file exists 
  if [ ! "$(ls -A ${KAFKA_CONF_DIR}/* 2> /dev/null)" ]; then 
    echo "There aren't any configuration files in ${KAFKA_CONF_DIR}" 
    return
  fi 
   
  HOME=${KAFKA_HOME} 
  export PATH HOME 
   
  #test_args="" 
  #$program ${test_args} 
  #[ $? -eq 0 ] && return 0 
  # Program not configured 
  #return 6 
   
case "$1" in 
  start) 
    status 
    code=$? 
    if [ $code -eq 0 ]; then 
      echo "$name is already running" 
    else 
      start 
      code=$? 
    fi 
    exit $code 
    ;; 
  stop) stop ;; 
  force-stop) force_stop ;; 
  status) 
    status 
    code=$? 
    if [ $code -eq 0 ] ; then 
      echo "$name is running" 
    else 
      echo "$name is not running" 
    fi 
    exit $code 
    ;; 
  reload) reload ;; 
  restart) 
    stop &&     start
    ;; 
  check) 
    configtest 
    exit $? 
    ;; 
  *) 
    echo "Usage: $SCRIPTNAME {start|stop|status}" >&2 
    exit 3 
  ;; 
esac 
   
exit $? 

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# cat server.properties |grep -v "^#"|grep -v "^$"
broker.id=1
delete.topic.enable=true
default.replication.factor=2
listeners=PLAINTEXT://192.168.1.190:9092
advertised.listeners=PLAINTEXT://192.168.1.190:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
message.max.bytes = 10240000
log.dirs=/home/elasticsearch/kafka/logs
num.partitions=6
num.recovery.threads.per.data.dir=1
log.cleaner.enable=false
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
log.cleaner.enable=true
log.cleanup.policy=delete
log.cleaner.min.compaction.lag.ms=86400000
zookeeper.connect=zoo1:2181,zoo2:2181,zoo3:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
1
2
3
# cat producer.properties |grep -v "^#"|grep -v "^$"
bootstrap.servers=zoo1:9020,zoo29092,zoo3:9092
compression.type=none

  

posted @   MR__Wang  阅读(420)  评论(0编辑  收藏  举报
编辑推荐:
· AI与.NET技术实操系列:基于图像分类模型对图像进行分类
· go语言实现终端里的倒计时
· 如何编写易于单元测试的代码
· 10年+ .NET Coder 心语,封装的思维:从隐藏、稳定开始理解其本质意义
· .NET Core 中如何实现缓存的预热?
阅读排行:
· 25岁的心里话
· 闲置电脑爆改个人服务器(超详细) #公网映射 #Vmware虚拟网络编辑器
· 零经验选手,Compose 一天开发一款小游戏!
· 因为Apifox不支持离线,我果断选择了Apipost!
· 通过 API 将Deepseek响应流式内容输出到前端
点击右上角即可分享
微信分享提示