k8s部署zookeeper集群

1.zookeeper集群部署说明

  • k8s部署zookeeper集群(3个节点),可实现他的高可用性、一致性、容错性、扩展性、分布式协调等优势。

2.部署环境

IP 节点 操作系统 k8s版本 zookeeper版本 docker版本 zookeeper节点
172.16.4.85 master1 centos7.8 1.23.17   20.10.9  
172.16.4.86 node1 centos7.8 1.23.17 3.8.0 20.10.9 自动选举follower
172.16.4.87 node2 centos7.8 1.23.17 3.8.0 20.10.9 自动选举follower
172.16.4.89 node3 centos7.8 1.23.17 3.8.0 20.10.9 自动选举leader

3.部署k8s环境

  • 已经安装好k8s环境和colica网络插件,或者参考 https://www.cnblogs.com/Leonardo-li/p/18648449 进行部署

4.部署zookeeper集群

4.1 nfs部署

  • centos7安装nfs
yum install -y nfs-utils
  • 创建nfs共享目录
mkdir /nfs_share/k8s/zookeeper/pv{1..3}
  • nfs配置文件编辑
[root@localhost ~]# cat /etc/exports
/nfs_share/k8s/zookeeper/pv1 *(rw,sync,no_subtree_check,no_root_squash)
/nfs_share/k8s/zookeeper/pv2 *(rw,sync,no_subtree_check,no_root_squash)
/nfs_share/k8s/zookeeper/pv3 *(rw,sync,no_subtree_check,no_root_squash)
  • 启动nfs服务
# 启动 NFS 服务
systemctl start nfs-server

# 设置 NFS 服务在系统启动时自动启动
systemctl enable nfs-server
  • 加载配置文件,并输出
[root@localhost ~]# exportfs -r
[root@localhost ~]# exportfs -v
/nfs_share/k8s/zookeeper/pv1
		<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/nfs_share/k8s/zookeeper/pv2
		<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/nfs_share/k8s/zookeeper/pv3
		<world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)

4.2 创建namespace

kubectl create ns zk

4.3 zookeeper pv部署

apiVersion: v1
kind: PersistentVolume
metadata:
  name: zk-nfs-pv-0
spec:
  capacity:
    storage: 1Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: zk-nfs-storage
  nfs:
    path: /nfs_share/k8s/zookeeper/pv1  # 对应第一个 Zookeeper Pod 存储路径
    server: 172.16.4.60               # NFS 服务器地址
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zk-nfs-pv-1
spec:
  capacity:
    storage: 1Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: zk-nfs-storage
  nfs:
    path: /nfs_share/k8s/zookeeper/pv2  # 对应第二个 Zookeeper Pod 存储路径
    server: 172.16.4.60               # NFS 服务器地址
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zk-nfs-pv-2
spec:
  capacity:
    storage: 1Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: zk-nfs-storage
  nfs:
    path: /nfs_share/k8s/zookeeper/pv3  # 对应第三个 Zookeeper Pod 存储路径
    server: 172.16.4.60               # NFS 服务器地址
kubectl apply -f zk-pv.yaml

4.4 zookeeper configmap部署

apiVersion: v1
kind: ConfigMap
metadata:
  name: zk-scripts
  namespace: zk
  labels:
    app.kubernetes.io/name: zookeeper
    app.kubernetes.io/component: zookeeper
data:
  init-certs.sh: |-
    #!/bin/bash
  setup.sh: |-
    #!/bin/bash
    HOSTNAME="$(hostname -s)"
    echo "HOSTNAME  $HOSTNAME"
    if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
        export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
    else
        if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
            ORD=${BASH_REMATCH[2]}
            export ZOO_SERVER_ID="$((ORD + 1 ))"
        else
            echo "Failed to get index from hostname $HOSTNAME"
            exit 1
        fi
    fi
    echo "ZOO_SERVER_ID  $ZOO_SERVER_ID"
    exec /entrypoint.sh /run.sh
kubectl apply -f zk-cm.yaml

4.5 zookeeper statefulset部署

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zk-test
  namespace: zk
  labels:
    app.kubernetes.io/name: zookeeper
    app.kubernetes.io/component: zookeeper
    role: zookeeper
spec:
  replicas: 3
  podManagementPolicy: Parallel
  selector:
    matchLabels:
      app.kubernetes.io/name: zookeeper
      app.kubernetes.io/component: zookeeper
  serviceName: zk-headless
  updateStrategy:
    rollingUpdate: {}
    type: RollingUpdate
  template:
    metadata:
      annotations:
      labels:
        app.kubernetes.io/name: zookeeper
        app.kubernetes.io/component: zookeeper
    spec:
      serviceAccountName: default
      affinity:
        podAntiAffinity:                                    # Pod反亲和性
          preferredDuringSchedulingIgnoredDuringExecution:  # 软策略,使Pod分布在不同的节点上
          - weight: 49                                      # 权重,有多个策略通过权重控制调度
            podAffinityTerm:
              topologyKey: app.kubernetes.io/name           # 通过app.kubernetes.io/name作为域调度
              labelSelector:
                matchExpressions:
                - key: app.kubernetes.io/component
                  operator: In
                  values:
                  - zookeeper
      securityContext:
        fsGroup: 1001
      initContainers:
      containers:
        - name: zookeeper
          image: 172.16.4.17:8090/ltzx/bitnami/zookeeper:3.8.0-debian-10-r0
          imagePullPolicy: "IfNotPresent"
          securityContext:
            runAsNonRoot: true
            runAsUser: 1001
          command:
            - /scripts/setup.sh
          resources:                                       # QoS 最高等级
            limits:
              cpu: 500m
              memory: 500Mi
            requests:
              cpu: 500m
              memory: 500Mi
          env:
            - name: BITNAMI_DEBUG
              value: "false"
            - name: ZOO_DATA_LOG_DIR
              value: ""
            - name: ZOO_PORT_NUMBER
              value: "32181"
            - name: ZOO_TICK_TIME
              value: "2000"
            - name: ZOO_INIT_LIMIT
              value: "10"
            - name: ZOO_SYNC_LIMIT
              value: "5"
            - name: ZOO_PRE_ALLOC_SIZE
              value: "65536"
            - name: ZOO_SNAPCOUNT
              value: "100000"
            - name: ZOO_MAX_CLIENT_CNXNS
              value: "60"
            - name: ZOO_4LW_COMMANDS_WHITELIST
              value: "srvr, mntr, ruok"
            - name: ZOO_LISTEN_ALLIPS_ENABLED
              value: "no"
            - name: ZOO_AUTOPURGE_INTERVAL
              value: "0"
            - name: ZOO_AUTOPURGE_RETAIN_COUNT
              value: "3"
            - name: ZOO_MAX_SESSION_TIMEOUT
              value: "40000"
            - name: ZOO_CFG_EXTRA
              value: "quorumListenOnAllIPs=true"
            - name: ZOO_SERVERS
              value: "zk-test-0.zk-headless.zk.svc.cluster.local:32888:33888::1,zk-test-1.zk-headless.zk.svc.cluster.local:32888:33888::2,zk-test-2.zk-headless.zk.svc.cluster.local:32888:33888::3"
            - name: ZOO_ENABLE_AUTH
              value: "no"
            - name: ZOO_HEAP_SIZE
              value: "1024"
            - name: ZOO_LOG_LEVEL
              value: "ERROR"
            - name: ALLOW_ANONYMOUS_LOGIN
              value: "yes"
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.name
          ports:
            - name: client
              containerPort: 32181
            - name: follower
              containerPort: 32888
            - name: election
              containerPort: 33888
          livenessProbe:
            failureThreshold: 6
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
            exec:
              command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 32181 | grep imok']
          readinessProbe:
            failureThreshold: 6
            initialDelaySeconds: 5
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
            exec:
              command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 32181 | grep imok']
          volumeMounts:
            - name: scripts
              mountPath: /scripts/setup.sh
              subPath: setup.sh
            - name: zookeeper-data
              mountPath: /bitnami/zookeeper
      volumes:
        - name: scripts
          configMap:
            name: zk-scripts
            defaultMode: 0755
  volumeClaimTemplates:
  - metadata:
      name: zookeeper-data
    spec:
      storageClassName: zk-nfs-storage
      accessModes:
      - ReadWriteOnce
      resources:
        requests:
          storage: 1Gi
kubectl apply -f zk-ss.yaml

4.6 zookeeper svc部署

  • 部署 Service Headless,用于zookeeper间相互通信
apiVersion: v1
kind: Service
metadata:
  name: zk-headless
  namespace: zk
  labels:
    app.kubernetes.io/name: zookeeper
    app.kubernetes.io/component: zookeeper
spec:
  type: ClusterIP
  clusterIP: None
  publishNotReadyAddresses: true
  ports:
    - name: tcp-client
      port: 32181
      targetPort: client
    - name: tcp-follower
      port: 32888
      targetPort: follower
    - name: tcp-election
      port: 33888
      targetPort: election
  selector:
    app.kubernetes.io/name: zookeeper
kubectl apply -f zk-svc-headless.yaml 
  • 部署 Service,用于外部访问 Zookeeper
apiVersion: v1
kind: Service
metadata:
  name: zk-service
  namespace: zk
  labels:
    app.kubernetes.io/name: zookeeper
    app.kubernetes.io/component: zookeeper
spec:
  type: ClusterIP
  sessionAffinity: None
  ports:
    - name: tcp-client
      port: 32181
      targetPort: client
      nodePort: null
    - name: tcp-follower
      port: 32888
      targetPort: follower
    - name: tcp-election
      port: 33888
      targetPort: election
  selector:
    app.kubernetes.io/name: zookeeper
    app.kubernetes.io/component: zookeeper
kubectl apply -f zk-svc.yaml 

4.7 部署完查看状态

[root@master1 zk-n7]# kubectl get pv | grep zookeeper
zk-nfs-pv-0             1Gi        RWO            Retain           Bound    zk/zookeeper-data-zk-test-0              zk-nfs-storage            79m
zk-nfs-pv-1             1Gi        RWO            Retain           Bound    zk/zookeeper-data-zk-test-1              zk-nfs-storage            79m
zk-nfs-pv-2             1Gi        RWO            Retain           Bound    zk/zookeeper-data-zk-test-2              zk-nfs-storage            79m
[root@master1 zk-n7]# kubectl get pvc -nzk
NAME                       STATUS   VOLUME        CAPACITY   ACCESS MODES   STORAGECLASS     AGE
zookeeper-data-zk-test-0   Bound    zk-nfs-pv-0   1Gi        RWO            zk-nfs-storage   79m
zookeeper-data-zk-test-1   Bound    zk-nfs-pv-1   1Gi        RWO            zk-nfs-storage   79m
zookeeper-data-zk-test-2   Bound    zk-nfs-pv-2   1Gi        RWO            zk-nfs-storage   79m
[root@master1 zk-n7]# kubectl get statefulset -nzk
NAME      READY   AGE
zk-test   3/3     65m
[root@master1 zk-n7]# kubectl get pods -nzk
NAME        READY   STATUS    RESTARTS   AGE
zk-test-0   1/1     Running   0          65m
zk-test-1   1/1     Running   0          65m
zk-test-2   1/1     Running   0          65m
[root@master1 zk-n7]# kubectl get pods -nzk -o wide
NAME        READY   STATUS    RESTARTS   AGE   IP               NODE    NOMINATED NODE   READINESS GATES
zk-test-0   1/1     Running   0          66m   10.244.135.20    node3   <none>           <none>
zk-test-1   1/1     Running   0          66m   10.244.104.25    node2   <none>           <none>
zk-test-2   1/1     Running   0          66m   10.244.166.154   node1   <none>           <none>
[root@master1 zk-n7]# kubectl get svc -nzk 
NAME          TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                         AGE
zk-headless   ClusterIP   None            <none>        32181/TCP,32888/TCP,33888/TCP   5h39m
zk-service    ClusterIP   10.105.143.79   <none>        32181/TCP,32888/TCP,33888/TCP   5h38m

5.zookeeper集群验证

  • 可以看到mode:一个leader,两个follower
[root@master1 zk-n7]# kubectl exec -it zk-test-0 -n zk -- /bin/bash
I have no name!@zk-test-0:/$ zkServer.sh status
/opt/bitnami/java/bin/java
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 32181. Client address: localhost. Client SSL: false.
Mode: follower

[root@master1 zk-n7]# kubectl exec -it zk-test-1 -n zk -- /bin/bash
I have no name!@zk-test-1:/$ zkServer.sh status
/opt/bitnami/java/bin/java
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 32181. Client address: localhost. Client SSL: false.
Mode: follower

[root@master1 zk-n7]# kubectl exec -it zk-test-2 -n zk -- /bin/bash
I have no name!@zk-test-2:/$ zkServer.sh status
/opt/bitnami/java/bin/java
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 32181. Client address: localhost. Client SSL: false.
Mode: leader

6.zk集群内部地址

格式1:
zk-service.zk.svc.cluster.local:32181

格式2:
zk-service.zk:32181
(1)格式解析
zk-service:这是 ZooKeeper 服务在 Kubernetes 中的service服务名,同样是在创建 ZooKeeper 服务的 service.yaml 配置文件中定义的。
.zk:表示 ZooKeeper 服务所在的命名空间。
:32181:是 ZooKeeper 服务监听的端口号。
(2)使用场景
这种格式也是用于 Kubernetes 集群内的客户端连接 ZooKeeper 服务。不过,在实际使用中,有时候可以省略 .svc.cluster.local 后缀。因为在 Kubernetes 集群内部,当客户端使用 zk-service.zk 这样的名称时,Kubernetes 的 DNS 服务会自动尝试将其解析为对应的服务地址。这是一种简化的写法,在很多情况下都能正常工作。

7.zkui部署

7.1 zkui下载、dockerfile打成镜像

7.2 zkui config.cfg配置修改

[root@master1 zkui]# cat config.cfg 
#Server Port
serverPort=9090
#Comma seperated list of all the zookeeper servers
zkServer=zk-service.zk.svc.cluster.local:32181
#Http path of the repository. Ignore if you dont intent to upload files from repository.
scmRepo=http://myserver.com/@rev1=
#Path appended to the repo url. Ignore if you dont intent to upload files from repository.
scmRepoPath=//appconfig.txt
#if set to true then userSet is used for authentication, else ldap authentication is used.
ldapAuth=false
ldapDomain=mycompany,mydomain
#ldap authentication url. Ignore if using file based authentication.
ldapUrl=ldap://<ldap_host>:<ldap_port>/dc=mycom,dc=com
#Specific roles for ldap authenticated users. Ignore if using file based authentication.
ldapRoleSet={"users": [{ "username":"domain\\user1" , "role": "ADMIN" }]}
userSet = {"users": [{ "username":"admin" , "password":"123456","role": "ADMIN" },{ "username":"appconfig" , "password":"123456","role": "USER" }]}
#Set to prod in production and dev in local. Setting to dev will clear history each time.
env=prod
jdbcClass=org.h2.Driver
jdbcUrl=jdbc:h2:zkui
jdbcUser=root
jdbcPwd=manager
#If you want to use mysql db to store history then comment the h2 db section.
#jdbcClass=com.mysql.jdbc.Driver
#jdbcUrl=jdbc:mysql://localhost:3306/zkui
#jdbcUser=root
#jdbcPwd=manager
loginMessage=Please login using admin/manager or appconfig/appconfig.
#session timeout 5 mins/300 secs.
sessionTimeout=300
#Default 5 seconds to keep short lived zk sessions. If you have large data then the read will take more than 30 seconds so increase this accordingly. 
#A bigger zkSessionTimeout means the connection will be held longer and resource consumption will be high.
zkSessionTimeout=5
#Block PWD exposure over rest call.
blockPwdOverRest=false
#ignore rest of the props below if https=false.
https=false
keystoreFile=/home/user/keystore.jks
keystorePwd=password
keystoreManagerPwd=password
# The default ACL to use for all creation of nodes. If left blank, then all nodes will be universally accessible
# Permissions are based on single character flags: c (Create), r (read), w (write), d (delete), a (admin), * (all)
# For example defaultAcl={"acls": [{"scheme":"ip", "id":"192.168.1.192", "perms":"*"}, {"scheme":"ip", id":"192.168.1.0/24", "perms":"r"}]
defaultAcl=
# Set X-Forwarded-For to true if zkui is behind a proxy
X-Forwarded-For=false

7.3 zkui configmap

kubectl create configmap zkui-cm --from-file=config.cfg -n zk

7.4 zkui deployment

apiVersion: apps/v1
kind: Deployment
metadata:
  name: zkui
  namespace: zk
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zkui
  template:
    metadata:
      labels:
        app: zkui
    spec:
      containers:
        - name: zkui
          image: 172.16.4.17:8090/ltzx/zkui:2.0
          ports:
            - containerPort: 9090
          env:
            - name: ZKUI_ZK_SERVERS
              value: "zk-service.zk.svc.cluster.local:32181"
          volumeMounts:
            - name: config-volume
              mountPath: /var/app/config.cfg  # 挂载路径
              subPath: config.cfg            # ConfigMap 中的文件名
      volumes:
        - name: config-volume
          configMap:
            name: zkui-cm
            items:
              - key: config.cfg
                path: config.cfg
      restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
  name: zkui
  namespace: zk
spec:
  selector:
    app: zkui
  ports:
    - protocol: TCP
      port: 9090
      targetPort: 9090
      nodePort: 32766
  type: NodePort
kubectl apply -f zkui.yaml

7.5 zkui 状态查看

[root@master1 zkui]# kubectl get pods -n zk |grep zkui
zkui-5b448c88d9-wst55   1/1     Running   0          12m
http://masterip:32766/

8.参考文档

https://www.cnblogs.com/llds/p/17198487.html

 

posted @   Leonardo-li  阅读(6)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· DeepSeek 开源周回顾「GitHub 热点速览」
· 记一次.NET内存居高不下排查解决与启示
· 物流快递公司核心技术能力-地址解析分单基础技术分享
· .NET 10首个预览版发布:重大改进与新特性概览!
· .NET10 - 预览版1新功能体验(一)
点击右上角即可分享
微信分享提示