[root@k8s-master01集群]# cat zk-cm.yaml apiVersion:v1kind:ConfigMapmetadata:name:infra-zk-scriptsnamespace:infralabels:app.kubernetes.io/name:zookeeperapp.kubernetes.io/component:zookeeperdata:init-certs.sh:|-
#!/bin/bash
setup.sh:|-
#!/bin/bash
if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
else
HOSTNAME="$(hostname -s)"
if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
ORD=${BASH_REMATCH[2]}
export ZOO_SERVER_ID="$((ORD + 1 ))"
else
echo "Failed to get index from hostname $HOST"
exit 1
fi
fi
exec /entrypoint.sh /run.sh
[root@k8s-master01 集群]# kubectl get po -n infra -l app.kubernetes.io/component=zookeeper -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
zk-test-0 1/1 Running 0 10m 10.244.195.58 k8s-master03 <none> <none>
zk-test-1 1/1 Running 0 10m 10.244.85.200 k8s-node01 <none> <none>
zk-test-2 1/1 Running 0 10m 10.244.58.196 k8s-node02 <none> <none>
1.6、查看zookeeper配置
[root@k8s-master01 集群]# kubectl exec -it zk-test-0 -n infra -- cat /opt/bitnami/zookeeper/conf/zoo.cfg # The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial # synchronization phase can take
initLimit=10
# The number of ticks that can pass between # sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.# do not use /tmp for storage, /tmp here is just # example sakes.
dataDir=/bitnami/zookeeper/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.# increase this if you need to handle more clients
maxClientCnxns=60
## Be sure to read the maintenance section of the # administrator guide before turning on autopurge.## https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance## The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours# Set to "0" to disable auto purge feature
autopurge.purgeInterval=0
## Metrics Providers## https://prometheus.io Metrics Exporter#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider#metricsProvider.httpHost=0.0.0.0#metricsProvider.httpPort=7000#metricsProvider.exportJvmInfo=true
preAllocSize=65536
snapCount=100000
maxCnxns=0
reconfigEnabled=false
quorumListenOnAllIPs=false
4lw.commands.whitelist=srvr, mntr, ruok
maxSessionTimeout=40000
admin.serverPort=8080
admin.enableServer=true
server.1=zk-test-0.zk-headless.infra.svc.cluster.local:2888:3888;2181
server.2=zk-test-1.zk-headless.infra.svc.cluster.local:2888:3888;2181
server.3=zk-test-2.zk-headless.infra.svc.cluster.local:2888:3888;2181
1.7、查看zk集群状态
看到说明是正常的,到此集群部署成功
[root@k8s-master01 集群]# kubectl exec -it zk-test-0 -n infra -- /opt/bitnami/zookeeper/bin/zkServer.sh status
/opt/bitnami/java/bin/java
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
[root@k8s-master01 集群]# kubectl exec -it zk-test-1 -n infra -- /opt/bitnami/zookeeper/bin/zkServer.sh status
/opt/bitnami/java/bin/java
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader
[root@k8s-master01 集群]# kubectl exec -it zk-test-2 -n infra -- /opt/bitnami/zookeeper/bin/zkServer.sh status
/opt/bitnami/java/bin/java
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 【自荐】一款简洁、开源的在线白板工具 Drawnix
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· Docker 太简单,K8s 太复杂?w7panel 让容器管理更轻松!