K8S-存储-使用PV和PVC实现Zookeeper集群数据持久化
基于PV和PVC作为后端存储,实现zookeeper集群
下载JDK镜像
]# docker pull elevy/slim_java:8
制作zookeeper镜像
官网:https://downloads.apache.org/zookeeper/
Dockerfile
#FROM harbor-linux38.local.com/linux38/slim_java:8 FROM elevy/slim_java:8 ENV ZK_VERSION 3.4.14 RUN apk add --no-cache --virtual .build-deps \ ca-certificates \ gnupg \ tar \ wget && \ # # Install dependencies apk add --no-cache \ bash && \ # # Download Zookeeper wget -nv -O /tmp/zk.tgz "https://www.apache.org/dyn/closer.cgi?action=download&filename=zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz" && \ wget -nv -O /tmp/zk.tgz.asc "https://www.apache.org/dist/zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz.asc" && \ wget -nv -O /tmp/KEYS https://dist.apache.org/repos/dist/release/zookeeper/KEYS && \ # # Verify the signature export GNUPGHOME="$(mktemp -d)" && \ gpg -q --batch --import /tmp/KEYS && \ gpg -q --batch --no-auto-key-retrieve --verify /tmp/zk.tgz.asc /tmp/zk.tgz && \ # # Set up directories # mkdir -p /zookeeper/data /zookeeper/wal /zookeeper/log && \ # # Install tar -x -C /zookeeper --strip-components=1 --no-same-owner -f /tmp/zk.tgz && \ # # Slim down cd /zookeeper && \ cp dist-maven/zookeeper-${ZK_VERSION}.jar . && \ rm -rf \ *.txt \ *.xml \ bin/README.txt \ bin/*.cmd \ conf/* \ contrib \ dist-maven \ docs \ lib/*.txt \ lib/cobertura \ lib/jdiff \ recipes \ src \ zookeeper-*.asc \ zookeeper-*.md5 \ zookeeper-*.sha1 && \ # # Clean up apk del .build-deps && \ rm -rf /tmp/* "$GNUPGHOME" COPY conf /zookeeper/conf/ COPY bin/zkReady.sh /zookeeper/bin/ COPY entrypoint.sh / ENV PATH=/zookeeper/bin:${PATH} \ ZOO_LOG_DIR=/zookeeper/log \ ZOO_LOG4J_PROP="INFO, CONSOLE, ROLLINGFILE" \ JMXPORT=9010 ENTRYPOINT [ "/entrypoint.sh" ] CMD [ "zkServer.sh", "start-foreground" ] EXPOSE 2181 2888 3888 9010
构建
]# cat build-command.sh #!/bin/bash TAG=$1 docker build -t harbor.magedu.local/magedu/zookeeper:${TAG} . sleep 1 docker push harbor.magedu.local/magedu/zookeeper:${TAG}
创建存储
]# mkdir -p /data/k8sdata/magedu/zookeeper-datadir-{1,2,3} ]# cat /etc/exports # /root/nfs *(rw,sync,no_root_squash) /root/data/nfs1 *(rw,sync,no_root_squash) /root/data/nfs2 *(rw,sync,no_root_squash) /data/k8sdata/magedu *(rw,sync,no_root_squash)
创建PV
--- apiVersion: v1 kind: PersistentVolume metadata: name: zookeeper-datadir-pv-1 spec: capacity: storage: 2Gi accessModes: - ReadWriteOnce //单节点读写挂载,单个节点多pod读写挂载 persistentVolumeReclaimPolicy: Retain //回收策略为保留 nfs: server: 192.168.64.110 path: /data/k8sdata/magedu/zookeeper-datadir-1 --- apiVersion: v1 kind: PersistentVolume metadata: name: zookeeper-datadir-pv-2 spec: capacity: storage: 2Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain nfs: server: 192.168.64.110 path: /data/k8sdata/magedu/zookeeper-datadir-2 --- apiVersion: v1 kind: PersistentVolume metadata: name: zookeeper-datadir-pv-3 spec: capacity: storage: 2Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain nfs: server: 192.168.64.110 path: /data/k8sdata/magedu/zookeeper-datadir-3
创建PVC
--- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: zookeeper-datadir-pvc-1 namespace: linux40 spec: accessModes: - ReadWriteOnce volumeMode: Filesystem volumeName: zookeeper-datadir-pv-11 resources: requests: storage: 1Gi --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: zookeeper-datadir-pvc-2 namespace: linux40 spec: accessModes: - ReadWriteOnce volumeMode: Filesystem volumeName: zookeeper-datadir-pv-2 resources: requests: storage: 1Gi --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: zookeeper-datadir-pvc-3 namespace: linux40 spec: accessModes: - ReadWriteOnce volumeMode: Filesystem volumeName: zookeeper-datadir-pv-3 resources: requests: storage: 1Gi
验证创建好的PV和PVC
[root@master pv]# kubectl apply -f zookeeper-persistentvolume.yaml persistentvolume/zookeeper-datadir-pv-1 created persistentvolume/zookeeper-datadir-pv-2 created persistentvolume/zookeeper-datadir-pv-3 created
[root@master pv]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE zookeeper-datadir-pv-1 2Gi RWO Retain Available 2s zookeeper-datadir-pv-2 2Gi RWO Retain Available 2s zookeeper-datadir-pv-3 2Gi RWO Retain Available 2s
]# kubectl get pvc -A NAMESPACE NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE linux40 zookeeper-datadir-pvc-1 Bound zookeeper-datadir-pv-1 2Gi RWO 14s linux40 zookeeper-datadir-pvc-2 Bound zookeeper-datadir-pv-2 2Gi RWO 14s linux40 zookeeper-datadir-pvc-3 Bound zookeeper-datadir-pv-3 2Gi RWO 14s
运行zookeeper集群
apiVersion: v1 kind: Service metadata: name: zookeeper namespace: linux40 spec: ports: - name: client port: 2181 selector: app: zookeeper --- apiVersion: v1 kind: Service metadata: name: zookeeper1 namespace: linux40 spec: type: NodePort ports: - name: client port: 2181 nodePort: 32181 - name: followers port: 2888 - name: election port: 3888 selector: app: zookeeper server-id: "1" --- apiVersion: v1 kind: Service metadata: name: zookeeper2 namespace: linux40 spec: type: NodePort ports: - name: client port: 2181 nodePort: 32182 - name: followers port: 2888 - name: election port: 3888 selector: app: zookeeper server-id: "2" --- apiVersion: v1 kind: Service metadata: name: zookeeper3 namespace: linux40 spec: type: NodePort ports: - name: client port: 2181 nodePort: 32183 - name: followers port: 2888 - name: election port: 3888 selector: app: zookeeper server-id: "3" --- kind: Deployment apiVersion: apps/v1 metadata: name: zookeeper1 namespace: linux40 spec: replicas: 1 selector: matchLabels: app: zookeeper template: metadata: labels: app: zookeeper server-id: "1" spec: volumes: - name: data emptyDir: {} - name: wal emptyDir: medium: Memory containers: - name: server image: zookeeper-3.4.14:v2 imagePullPolicy: IfNotPresent env: - name: MYID value: "1" - name: SERVERS value: "zookeeper1,zookeeper2,zookeeper3" - name: JVMFLAGS value: "-Xmx2G" ports: - containerPort: 2181 laimName: zookeeper-datadir-pvc-1 --- kind: Deployment #apiVersion: extensions/v1beta1 apiVersion: apps/v1 metadata: name: zookeeper2 namespace: linux40 spec: replicas: 1 selector: matchLabels: app: zookeeper template: metadata: labels: app: zookeeper server-id: "2" spec: volumes: - name: data emptyDir: {} - name: wal emptyDir: medium: Memory containers: - name: server image: zookeeper-3.4.14:v2 imagePullPolicy: IfNotPresent env: - name: MYID value: "2" - name: SERVERS value: "zookeeper1,zookeeper2,zookeeper3" - name: JVMFLAGS value: "-Xmx2G" ports: - containerPort: 2181 - containerPort: 2888 - containerPort: 3888 volumeMounts: - mountPath: "/zookeeper/data" name: zookeeper-datadir-pvc-2 volumes: - name: zookeeper-datadir-pvc-2 persistentVolumeClaim: claimName: zookeeper-datadir-pvc-2 --- kind: Deployment apiVersion: apps/v1 metadata: name: zookeeper3 namespace: linux40 spec: replicas: 1 selector: matchLabels: app: zookeeper template: metadata: labels: app: zookeeper server-id: "3" spec: volumes: - name: data emptyDir: {} - name: wal emptyDir: medium: Memory containers: - name: server image: zookeeper-3.4.14:v2 imagePullPolicy: IfNotPresent env: - name: MYID value: "3" - name: SERVERS value: "zookeeper1,zookeeper2,zookeeper3" - name: JVMFLAGS value: "-Xmx2G" ports: - containerPort: 2181 - containerPort: 2888 - containerPort: 3888 volumeMounts: - mountPath: "/zookeeper/data" name: zookeeper-datadir-pvc-3 volumes: - name: zookeeper-datadir-pvc-3 persistentVolumeClaim: claimName: zookeeper-datadir-pvc-3
验证zookeeper集群
]# kubectl exec -it -n linux40 zookeeper1-59d75b7999-mr8p7 sh / # /zookeeper/bin/zkServer.sh status ZooKeeper JMX enabled by default ZooKeeper remote JMX Port set to 9010 ZooKeeper remote JMX authenticate set to false ZooKeeper remote JMX ssl set to false ZooKeeper remote JMX log4j set to true Using config: /zookeeper/bin/../conf/zoo.cfg Mode: follower
]# kubectl exec -it -n linux40 zookeeper2-76bc99fd5b-pmnx6 sh / # /zookeeper/bin/zkServer.sh status ZooKeeper JMX enabled by default ZooKeeper remote JMX Port set to 9010 ZooKeeper remote JMX authenticate set to false ZooKeeper remote JMX ssl set to false ZooKeeper remote JMX log4j set to true Using config: /zookeeper/bin/../conf/zoo.cfg Mode: follower
]# kubectl exec -it -n linux40 zookeeper3-6856589fc-6wvgq sh / # /zookeeper/bin/zkServer.sh status ZooKeeper JMX enabled by default ZooKeeper remote JMX Port set to 9010 ZooKeeper remote JMX authenticate set to false ZooKeeper remote JMX ssl set to false ZooKeeper remote JMX log4j set to true Using config: /zookeeper/bin/../conf/zoo.cfg Mode: leader
删除期中一个pod,比如删除leader,验证在其余两个pod是否能自动 选举出新的leader,然后验证删除的pod重建后,是否以follower的身份加入到zookeeper集群中。
]# kubectl delete pod -n linux40 zookeeper2-76bc99fd5b-gzs8x pod "zookeeper2-76bc99fd5b-gzs8x" deleted ]# kubectl exec -it -n linux40 zookeeper3-6856589fc-np6bv sh / # /zookeeper/bin/zkServer.sh status ZooKeeper JMX enabled by default ZooKeeper remote JMX Port set to 9010 ZooKeeper remote JMX authenticate set to false ZooKeeper remote JMX ssl set to false ZooKeeper remote JMX log4j set to true Using config: /zookeeper/bin/../conf/zoo.cfg Mode: leader
越学越感到自己的无知