k8s 部署zookeeper集群
zk清单
apiVersion: v1 kind: ConfigMap metadata: name: zookeeper-config namespace: app data: zoo.cfg: | tickTime=5000 initLimit=10 syncLimit=5 dataDir=/data clientPort=2181 autopurge.snapRetainCount=3 #zookeeper.electionPortBindRetry=10 autopurge.purgeInterval=1 server.0=zookeeper-cluster-0.zookeeper-headless.app.svc.cluster.local:2888:3888 # 标黄的部分对应myid server.1=zookeeper-cluster-1.zookeeper-headless.app.svc.cluster.local:2888:3888 server.2=zookeeper-cluster-2.zookeeper-headless.app.svc.cluster.local:2888:3888 --- apiVersion: v1 kind: Service metadata: name: zookeeper-headless namespace: app labels: app: zookeeper spec: ports: - name: client port: 2181 targetPort: 2181 - name: quorum port: 2888 targetPort: 2888 - name: leader-election port: 3888 targetPort: 3888 clusterIP: None selector: app: zookeeper --- kind: Service apiVersion: v1 metadata: name: zk-external namespace: app labels: app: zookeeper spec: type: NodePort ports: - name: http port: 2181 protocol: TCP targetPort: 2181 nodePort: selector: app: zookeeper --- apiVersion: apps/v1 kind: StatefulSet metadata: name: zookeeper-cluster namespace: app labels: app: zookeeper spec: serviceName: zookeeper-headless replicas: 3 selector: matchLabels: app: zookeeper template: metadata: labels: app: zookeeper spec: initContainers: - name: init-myid image: xx:10006/k8s/busybox:1.28 imagePullPolicy: IfNotPresent command: - sh - -c - | echo $((`hostname | awk -F '-' '{print $3}'`)) > /data/myid # 对应配置文件的server.x volumeMounts: - name: data mountPath: /data # 共享pvc containers: - name: zookeeper-cluster image: zookeeper:latest imagePullPolicy: IfNotPresent ports: - containerPort: 2181 name: client - containerPort: 2888 name: quorum - containerPort: 3888 name: leader-election volumeMounts: - name: data mountPath: /data # 共享初始化镜像存储 - name: config mountPath: /conf # 此目录是zk镜像的变量ZOOCFGDIR=/conf #- name: config # mountPath: /apache-zookeeper-3.9.3-bin/conf volumes: - name: config configMap: name: zookeeper-config volumeClaimTemplates: - metadata: name: data spec: accessModes: [ "ReadWriteOnce" ] resources: requests: storage: 1Gi
部署查看状态
root@zookeeper-cluster-0:/apache-zookeeper-3.9.3-bin/bin# cat /data/myid 0]# kubectl exec -it -n app zookeeper-cluster-1 -- cat /data/myid 1]# kubectl exec -it -n app zookeeper-cluster-2 -- cat /data/myid 2]# kubectl exec -it -n app zookeeper-cluster-2 -- /apache-zookeeper-3.9.3-bin/bin/zkServer.sh status ZooKeeper JMX enabled by default Using config: /conf/zoo.cfg Client port found: 2181. Client address: localhost. Client SSL: false. Mode: leader ]# kubectl exec -it -n app zookeeper-cluster-1 -- /apache-zookeeper-3.9.3-bin/bin/zkServer.sh status ZooKeeper JMX enabled by default Using config: /conf/zoo.cfg Client port found: 2181. Client address: localhost. Client SSL: false. Mode: follower ]# kubectl exec -it -n app zookeeper-cluster-0 -- /apache-zookeeper-3.9.3-bin/bin/zkServer.sh status ZooKeeper JMX enabled by default Using config: /conf/zoo.cfg Client port found: 2181. Client address: localhost. Client SSL: false. Mode: follower
查看pod,svc
]# kubectl get pod,svc -n app NAME READY STATUS RESTARTS AGE pod/backend-84885dc4fb-pzkjr 1/1 Running 5 40h pod/zookeeper-cluster-0 1/1 Running 0 5m16s pod/zookeeper-cluster-1 1/1 Running 0 5m10s pod/zookeeper-cluster-2 1/1 Running 0 5m5s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/backend-svc NodePort 10.0.87.216 <none> 80:38248/TCP,443:39271/TCP 41h service/zk-external NodePort 10.0.7.66 <none> 2181:37659/TCP 7s service/zookeeper-headless ClusterIP None <none> 2181/TCP,2888/TCP,3888/TCP 5m16s
客户端连接
越学越感到自己的无知