k8s使用service名称搭建zookeeper集群+pv+pvc+nfs

192.168.211.151-153 k8s 154harbor
cd /opt/k8s-data-chuan.tar.gz
root@ubuntu20:/opt/k8s-data/dockerfile/web/chuan/zookeeper#
#拉取jdk镜像制作zookeeper镜像 docker pull elevy
/slim_java:8 root@ubuntu20:~# docker tag elevy/slim_java:8 harbor.chuan.net/baseimages/slim_java:8 root@ubuntu20:~# docker push harbor.chuan.net/baseimages/slim_java:8

root@ubuntu20:/opt/k8s-data/dockerfile/web/chuan/zookeeper# ls
Dockerfile
KEYS
bin
build-command.sh
conf
entrypoint.sh
repositories
zookeeper-3.12-Dockerfile.tar.gz
zookeeper-3.4.14.tar.gz
zookeeper-3.4.14.tar.gz.as

# ./build-command.sh zk1001
docker tag harbor.chuan.net/chuan/zookeeper:zk1001 harbor.chuan.net/baseimages/zookeeper:zk1001
docker push harbor.chuan.net/baseimages/zookeeper:zk1001
docker run -it --rm harbor.chuan.net/baseimages/zookeeper:zk1001

#配置nfs. 154服务端
#1.sudo apt-get install portmap #2.sudo apt-get install nfs-kernel-server #3.sudo apt-get install nfs-common
systemctl enable nfs-kernel-server root@
154:/data# cat /etc/exports /data/k8sdata *(rw,no_root_squash) /etc/init.d/nfs-kernel-server restart root@154:/data# showmount -e Export list for ubuntu20.04.3.example.com: /data/chuan *
#配置pv root@ubuntu20:
/opt/k8s-data/yaml/chuan/zookeeper/pv# cat zookeeper-persistentvolume.yaml --- apiVersion: v1 kind: PersistentVolume metadata: name: zookeeper-datadir-pv-1 spec: capacity: storage: 5Gi accessModes: - ReadWriteOnce nfs: server: 192.168.211.154 path: /data/k8sdata/chuan/zookeeper-datadir-1 --- apiVersion: v1 kind: PersistentVolume metadata: name: zookeeper-datadir-pv-2 spec: capacity: storage: 5Gi accessModes: - ReadWriteOnce nfs: server: 192.168.211.154 path: /data/k8sdata/chuan/zookeeper-datadir-2 --- apiVersion: v1 kind: PersistentVolume metadata: name: zookeeper-datadir-pv-3 spec: capacity: storage: 5Gi accessModes: - ReadWriteOnce nfs: server: 192.168.211.154 path: /data/k8sdata/chuan/zookeeper-datadir-3 root@ubuntu20:/opt/k8s-data/yaml/chuan/zookeeper/pv# kubectl apply -f zookeeper-persistentvolume.yaml persistentvolume/zookeeper-datadir-pv-1 created persistentvolume/zookeeper-datadir-pv-2 created persistentvolume/zookeeper-datadir-pv-3 created root@ubuntu20:/opt/k8s-data/yaml/chuan/zookeeper/pv# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE zookeeper-datadir-pv-1 5Gi RWO Retain Available 6s zookeeper-datadir-pv-2 5Gi RWO Retain Available 5s zookeeper-datadir-pv-3 5Gi RWO Retain Available 5s #配置pvc
root@ubuntu20:
/opt/k8s-data/yaml/chuan/zookeeper/pv# cat zookeeper-persistentvolumeclaim.yaml --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: zookeeper-datadir-pvc-1 namespace: chuan spec: accessModes: - ReadWriteOnce volumeName: zookeeper-datadir-pv-1 resources: requests: storage: 5Gi --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: zookeeper-datadir-pvc-2 namespace: chuan spec: accessModes: - ReadWriteOnce volumeName: zookeeper-datadir-pv-2 resources: requests: storage: 5Gi --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: zookeeper-datadir-pvc-3 namespace: chuan spec: accessModes: - ReadWriteOnce volumeName: zookeeper-datadir-pv-3 resources: requests: storage: 5Gi root@ubuntu20:/opt/k8s-data/yaml/chuan/zookeeper/pv# kubectl get pvc -A NAMESPACE NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE chuan zookeeper-datadir-pvc-1 Bound zookeeper-datadir-pv-1 5Gi RWO 28s chuan zookeeper-datadir-pvc-2 Bound zookeeper-datadir-pv-2 5Gi RWO 27s chuan zookeeper-datadir-pvc-3 Bound zookeeper-datadir-pv-3 5Gi RWO 27s #Dockerfile内容,使用域名搭建zookeeper集群 root@ubuntu20:/opt/k8s-data/dockerfile/web/chuan/zookeeper/conf# cat zoo.cfg tickTime=2000 initLimit=10 syncLimit=5 dataDir=/zookeeper/data dataLogDir=/zookeeper/wal #snapCount=100000 autopurge.purgeInterval=1 clientPort=2181 root@ubuntu20:/opt/k8s-data/dockerfile/web/chuan/zookeeper# cat entrypoint.sh #!/bin/bash echo ${MYID:-1} > /zookeeper/data/myid if [ -n "$SERVERS" ]; then IFS=\, read -a servers <<<"$SERVERS" for i in "${!servers[@]}"; do printf "\nserver.%i=%s:2888:3888" "$((1 + $i))" "${servers[$i]}" >> /zookeeper/conf/zoo.cfg done fi cd /zookeeper exec "$@"

#配置zookeeper yaml文件 root@ubuntu20:/opt/k8s-data/yaml/chuan/zookeeper# cat zookeeper.yaml apiVersion: v1 kind: Service metadata: name: zookeeper namespace: chuan spec: ports: - name: client port: 2181 selector: app: zookeeper --- apiVersion: v1 kind: Service metadata: name: zookeeper1 namespace: chuan spec: type: NodePort ports: - name: client port: 2181 nodePort: 42181 - name: followers port: 2888 - name: election port: 3888 selector: app: zookeeper server-id: "1" --- apiVersion: v1 kind: Service metadata: name: zookeeper2 namespace: chuan spec: type: NodePort ports: - name: client port: 2181 nodePort: 42182 - name: followers port: 2888 - name: election port: 3888 selector: app: zookeeper server-id: "2" --- apiVersion: v1 kind: Service metadata: name: zookeeper3 namespace: chuan spec: type: NodePort ports: - name: client port: 2181 nodePort: 42183 - name: followers port: 2888 - name: election port: 3888 selector: app: zookeeper server-id: "3" --- kind: Deployment #apiVersion: extensions/v1beta1 apiVersion: apps/v1 metadata: name: zookeeper1 namespace: chuan spec: replicas: 1 selector: matchLabels: app: zookeeper template: metadata: labels: app: zookeeper server-id: "1" spec: volumes: - name: data emptyDir: {} - name: wal emptyDir: medium: Memory containers: - name: server image: harbor.chuan.net/baseimages/zookeeper:zk1001 imagePullPolicy: Always env: - name: MYID value: "1" - name: SERVERS value: "zookeeper1,zookeeper2,zookeeper3" - name: JVMFLAGS value: "-Xmx2G" ports: - containerPort: 2181 - containerPort: 2888 - containerPort: 3888 volumeMounts: - mountPath: "/zookeeper/data" name: zookeeper-datadir-pvc-1 volumes: - name: zookeeper-datadir-pvc-1 persistentVolumeClaim: claimName: zookeeper-datadir-pvc-1 --- kind: Deployment #apiVersion: extensions/v1beta1 apiVersion: apps/v1 metadata: name: zookeeper2 namespace: chuan spec: replicas: 1 selector: matchLabels: app: zookeeper template: metadata: labels: app: zookeeper server-id: "2" spec: volumes: - name: data emptyDir: {} - name: wal emptyDir: medium: Memory containers: - name: server image: harbor.chuan.net/baseimages/zookeeper:zk1001 imagePullPolicy: Always env: - name: MYID value: "2" - name: SERVERS value: "zookeeper1,zookeeper2,zookeeper3" - name: JVMFLAGS value: "-Xmx2G" ports: - containerPort: 2181 - containerPort: 2888 - containerPort: 3888 volumeMounts: - mountPath: "/zookeeper/data" name: zookeeper-datadir-pvc-2 volumes: - name: zookeeper-datadir-pvc-2 persistentVolumeClaim: claimName: zookeeper-datadir-pvc-2 --- kind: Deployment #apiVersion: extensions/v1beta1 apiVersion: apps/v1 metadata: name: zookeeper3 namespace: chuan spec: replicas: 1 selector: matchLabels: app: zookeeper template: metadata: labels: app: zookeeper server-id: "3" spec: volumes: - name: data emptyDir: {} - name: wal emptyDir: medium: Memory containers: - name: server image: harbor.chuan.net/baseimages/zookeeper:zk1001 imagePullPolicy: Always env: - name: MYID value: "3" - name: SERVERS value: "zookeeper1,zookeeper2,zookeeper3" - name: JVMFLAGS value: "-Xmx2G" ports: - containerPort: 2181 - containerPort: 2888 - containerPort: 3888 volumeMounts: - mountPath: "/zookeeper/data" name: zookeeper-datadir-pvc-3 volumes: - name: zookeeper-datadir-pvc-3 persistentVolumeClaim: claimName: zookeeper-datadir-pvc-3 #查看是否搭建成功 / # /zookeeper/bin/zkServer.sh status ZooKeeper JMX enabled by default ZooKeeper remote JMX Port set to 9010 ZooKeeper remote JMX authenticate set to false ZooKeeper remote JMX ssl set to false ZooKeeper remote JMX log4j set to true Using config: /zookeeper/bin/../conf/zoo.cfg Mode: follower root@ubuntu20:~# kubectl exec -it zookeeper3-84f844f7c-d4l64 sh -n chuan kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. / # /zookeeper/bin/zkServer.sh status ZooKeeper JMX enabled by default ZooKeeper remote JMX Port set to 9010 ZooKeeper remote JMX authenticate set to false ZooKeeper remote JMX ssl set to false ZooKeeper remote JMX log4j set to true Using config: /zookeeper/bin/../conf/zoo.cfg Mode: leader

 

posted @ 2021-11-28 20:12  gg888666  阅读(173)  评论(0编辑  收藏  举报