Kafka-集群布署

1、环境准备

1.1、主机准备

1.1.1、主机列表

kafka-01 192.168.10.30
kafka-02 192.168.10.31
kafka-03 192.168.10.32

1.1.2、同步主机时间

# 设置时区
timedatectl set-timezone Asia/Shanghai

# 同步系统时间
apt install -y ntpdate cron
systemctl start cron
systemctl enable cron

cat << 'CAT_END' > /var/spool/cron/crontabs/root 
#ntp Server update
*/5 * * * * /usr/sbin/ntpdate ntp5.aliyun.com  2>&1 > /dev/null
#ntp end
CAT_END

ntpdate ntp5.aliyun.com

1.2、安装zookeeper软件集群

1.2.1、下载软件

# 切记需要看kafka是支持哪个版本的zookeeper下载对应版本安装即可

# cat /data/server/kafka_2.12-3.4.0/LICENSE | grep zookee
zookeeper-3.6.3
zookeeper-jute-3.6.3

# 版本一定要对得上,否则kafka会启动失败
curl -O https://archive.apache.org/dist/zookeeper/zookeeper-3.6.3/apache-zookeeper-3.6.3-bin.tar.gz

tar xf apache-zookeeper-3.6.3-bin.tar.gz -C /data/server/
ln -s /data/server/apache-zookeeper-3.6.3-bin /usr/local/zookeeper
cp /usr/local/zookeeper/conf/zoo_sample.cfg /usr/local/zookeeper/conf/zoo.cfg 
mkdir /usr/local/zookeeper/{data,/logs}

kafka-01:~# echo "1" >/usr/local/zookeeper/data/myid
kafka-02:~# echo "2" >/usr/local/zookeeper/data/myid
kafka-03:~# echo "3" >/usr/local/zookeeper/data/myid

1.2.2、kafka-01 zoo.cfg配置

root@kafka-01:~# grep -i '^[a-Z]' /usr/local/zookeeper/conf/zoo.cfg            
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper/data
dataLogDir=/usr/local/zookeeper/logs
clientPort=2181
clientPortAddress=192.168.10.30
maxClientCnxns=100
server.1=192.168.10.30:2182:2183
server.2=192.168.10.31:2182:2183
server.3=192.168.10.32:2182:2183

1.2.3、kafka-02 zoo.cfg配置

root@kafka-02:~# grep -i '^[a-Z]' /usr/local/zookeeper/conf/zoo.cfg    
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper/data
dataLogDir=/usr/local/zookeeper/logs
clientPort=2181
clientPortAddress=192.168.10.31
maxClientCnxns=100
server.1=192.168.10.30:2182:2183
server.2=192.168.10.31:2182:2183
server.3=192.168.10.32:2182:2183

1.2.4、kafka-03 zoo.cfg配置

root@kafka-03:~# grep -i '^[a-Z]' /usr/local/zookeeper/conf/zoo.cfg 
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper/data
dataLogDir=/usr/local/zookeeper/logs
clientPort=2181
clientPortAddress=192.168.10.32
maxClientCnxns=100
server.1=192.168.10.30:2182:2183
server.2=192.168.10.31:2182:2183
server.3=192.168.10.32:2182:2183

1.2.5、编写systemd服务

cat <<'CAT_END' > /lib/systemd/system/zk.service
[Unit]
Description=Apache zookeeper
After=network.target
[Service]
Type=forking
Environment=JAVA_HOME=/usr/local/java
ExecStart=/usr/local/zookeeper/bin/zkServer.sh start
ExecStop=/usr/local/zookeeper/bin/zkServer.sh stop
ExecRestart=/usr/local/zookeeper/bin/zkServer.sh restart
Restart=always
RestartSec=20
[Install]
WantedBy=multi-user.target
CAT_END
systemctl daemon-reload
systemctl start zk
systemctl enable zk

1.2.6、查看角色状态

root@kafka-01:~# /usr/local/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: 192.168.10.30. Client SSL: false.
Mode: follower

root@kafka-02:~# /usr/local/zookeeper/bin/zkServer.sh  status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: 192.168.10.31. Client SSL: false.
Mode: leader

root@kafka-03:~# /usr/local/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: 192.168.10.32. Client SSL: false.
Mode: follower

2、安装Kafka软件集群

2.1、下载软件

curl -O https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/3.4.0/kafka_2.13-3.4.0.tgz

2.2、安装软件

2.2.1、解压安装软件

tar xf kafka_2.12-3.4.0.tgz -C /data/server/
ln -s /data/server/kafka_2.12-3.4.0 /usr/local/kafka

2.2.2、配置环境变量

echo "export PATH=/usr/local/kafka/bin:$PATH" >/etc/profile.d/kafka.sh
source /etc/profile.d/kafka.sh

2.3、配置解析

2.3.1、目录结构解析

# tree -L 1 /usr/local/kafka/
/usr/local/kafka/
├── LICENSE
├── NOTICE
├── bin       # kafka脚本文件
├── config    # 配置文件目录
├── libs      # 库文件
├── licenses
└── site-docs

2.3.2、配置文件目录解析

# tree /usr/local/kafka/config/
/usr/local/kafka/config/
├── connect-console-sink.properties
├── connect-console-source.properties
├── connect-distributed.properties
├── connect-file-sink.properties
├── connect-file-source.properties
├── connect-log4j.properties
├── connect-mirror-maker.properties
├── connect-standalone.properties
├── consumer.properties
├── kraft
│   ├── broker.properties
│   ├── controller.properties
│   └── server.properties
├── log4j.properties
├── producer.properties
├── server.properties       # 核心的配置文件
├── tools-log4j.properties
├── trogdor.conf
└── zookeeper.properties    # Kafka内带的zk集群配置,不推荐

2.3.3、配置server.properties属性解析

# grep -Ev '^#|^$' /usr/local/kafka/config/server.properties 
broker.id=0                                # 主机唯一标识,如果有配套zk集群的话,推荐与myid一致
#listeners=PLAINTEXT://:9092               # kafka服务器监听的端口
num.network.threads=3                      # borker进行网络处理的线程数
num.io.threads=8                           # borker进行I/O处理的线程数
socket.send.buffer.bytes=102400            # 发送缓冲区buffer大小
socket.receive.buffer.bytes=102400         # 接收缓冲区buffer大小
socket.request.max.bytes=104857600         # kafka消息请求处理的最大数,不要超过java的堆栈大小
log.dirs=/tmp/kafka-logs                   # 消息存放的目录,多目录间用逗号隔开,小于num.io.threads
num.partitions=1                           # 默认的分区数,一个topic默认1个分区数
num.recovery.threads.per.data.dir=1        # 数据目录用来日志恢复的线程数目
offsets.topic.replication.factor=1         # 配置offset记录的topic的partition的副本个数
transaction.state.log.replication.factor=1 # 事务topic的复制个数
transaction.state.log.min.isr=1
# default.replication.factor=2             # 默认的备份的复制自动创建topics的个数
log.segment.bytes=1073741824               # 日志文件的最大容量,1G
log.retention.hours=168                    # 默认消息的最大持久化时间,168小时,7天
log.retention.check.interval.ms=300000     #  日志间隔检查的时间300000毫秒
zookeeper.connect=localhost:2181           # 设置zookeeper的连接端口,多个地址间使用逗号隔开
zookeeper.connection.timeout.ms=18000      # 设置zookeeper的连接超时时间
group.initial.rebalance.delay.ms=0

2.4、修改Kafka的配置

2.4.1、创建日志目录

mkdir /usr/local/kafka/logs

2.4.2、【kafka-01】修改配置server.properties

kafka-01:~# grep -i '^[a-Z]' /usr/local/kafka/config/server.properties 
broker.id=1
listeners=PLAINTEXT://192.168.10.30:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=2
log.retention.hours=168
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.10.32:2181,192.168.10.31:2181,192.168.10.30:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0

2.4.3、【kafka-02】修改配置server.properties

kafka-02:~# grep -i '^[a-Z]' /usr/local/kafka/config/server.properties 
broker.id=2
listeners=PLAINTEXT://192.168.10.31:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=2
log.retention.hours=168
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.10.32:2181,192.168.10.31:2181,192.168.10.30:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0

2.4.4、【kafka-03】修改配置server.properties

kafka-03:~# grep -i '^[a-Z]' /usr/local/kafka/config/server.properties
broker.id=3
listeners=PLAINTEXT://192.168.10.32:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=2
log.retention.hours=168
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.10.32:2181,192.168.10.31:2181,192.168.10.30:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0

2.5、启动kafka程序

2.5.1、前台启动

/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties 

2.5.2、后台启动

/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

2.5.3、关闭程序

/usr/local/kafka/bin/kafka-server-stop.sh /usr/local/kafka/config/server.properties

2.5.4、systemd编写

cat <<'CAT_END' > /lib/systemd/system/kafka.service
[Unit]
Description=Apache kafka
After=network.target
[Service]
Type=forking
Environment=JAVA_HOME=/usr/local/java
ExecStart=/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties 
ExecStop=/usr/local/kafka/bin/kafka-server-stop.sh -daemon /usr/local/kafka/config/server.properties 
ExecRestart=/usr/local/kafka/bin/kafka-server-stop.sh -daemon /usr/local/kafka/config/server.properties && \
/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties 
Restart=always
RestartSec=20
[Install]
WantedBy=multi-user.target
CAT_END
systemctl daemon-reload
systemctl start kafka
systemctl enable kafka

2.6、检查效果

kafka-01:~# ss -tunlp | grep 9092
tcp   LISTEN 0      50     [::ffff:192.168.10.30]:9092             *:*    users:(("java",pid=107842,fd=134)) 

kafka-02:~# ss -tunlp | grep 9092
tcp   LISTEN 0      50     [::ffff:192.168.10.31]:9092             *:*    users:(("java",pid=570054,fd=134)) 

kafka-03:~# ss -tunlp | grep 9092
tcp   LISTEN 0      50     [::ffff:192.168.10.32]:9092             *:*    users:(("java",pid=569711,fd=134)) 

kafka-01:~# jps
107842 Kafka
106869 QuorumPeerMain
107928 Jps

kafka-02:~# jps
569544 QuorumPeerMain
570148 Jps
570054 Kafka

kafka-03:~# jps
569816 Jps
569711 Kafka
569252 QuorumPeerMain

 

posted @ 2023-06-02 12:05  小粉优化大师  阅读(34)  评论(0编辑  收藏  举报