k8s使用glusterfs存储测试数据

一、基础环境介绍

  • 阿里云服务器
  • centos 7.9
[root@master ~]# cat /etc/hosts
::1	localhost	localhost.localdomain	localhost6	localhost6.localdomain6
127.0.0.1	localhost	localhost.localdomain	localhost4	localhost4.localdomain4

192.168.11.61 master
192.168.11.62 node1 #存储
192.168.11.63 node2 #存储
192.168.11.64 node3 #存储

二、 heketi安装的glusterfs配置

1、heketi配置的集群磁盘信息

[root@node1 ~]# heketi-cli --server http://localhost:8080 --user admin --secret admin@key topology info

Cluster Id: 55c5c02540458dae0414e622ba063f0d

    File:  true
    Block: true

    Volumes:


    Nodes:

	Node Id: 51456a927d994b71d0aa20d3eceed117
	State: online
	Cluster Id: 55c5c02540458dae0414e622ba063f0d
	Zone: 2
	Management Hostnames: 192.168.11.63
	Storage Hostnames: 192.168.11.63
	Devices:
		Id:37fd5355b3bd1d51520e180600d0a2ad   Name:/dev/vdb            State:online    Size (GiB):19      Used (GiB):0       Free (GiB):19      
			Bricks:

	Node Id: 706006259ae1e5b1de5e162339dfaddd
	State: online
	Cluster Id: 55c5c02540458dae0414e622ba063f0d
	Zone: 3
	Management Hostnames: 192.168.11.64
	Storage Hostnames: 192.168.11.64
	Devices:
		Id:326cd6fa573e44e04421588654311f7e   Name:/dev/vdb            State:online    Size (GiB):19      Used (GiB):0       Free (GiB):19      
			Bricks:

	Node Id: 828affec2ec522d9a99a2f2bd2f34761
	State: online
	Cluster Id: 55c5c02540458dae0414e622ba063f0d
	Zone: 1
	Management Hostnames: 192.168.11.62
	Storage Hostnames: 192.168.11.62
	Devices:
		Id:3632131cfe647d5d3b216bc36b883cfc   Name:/dev/vdb            State:online    Size (GiB):19      Used (GiB):0       Free (GiB):19      
			Bricks:

2、glusterfs的k8s动态存储信息

[root@master ~]# cat gluster-heketi-storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: glusterfs
provisioner: kubernetes.io/glusterfs
allowVolumeExpansion: true
reclaimPolicy: Delete
parameters:
  resturl: "http://192.168.11.62:8080"
  restauthenabled: "true"
  restuser: "admin"
  secretNamespace: "default"
  secretName: "heketi-secret"
  volumetype: "replicate:3" #复制3份,一台服务器一份数据

[root@master ~]# kubectl get sc 
NAME        PROVISIONER               RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
glusterfs   kubernetes.io/glusterfs   Delete          Immediate           true                   62s

三、测试的pvc和pod资源文件

  • test-pvc.yaml
[root@master ~]# cat test-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-glusterfs
  annotations:
    volume.beta.kubernetes.io/storage-class: "glusterfs"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
  • pv-pod.yaml
[root@master ~]# cat pv-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: task-pv-pod
spec:
  volumes:
    - name: task-pv-storage
      persistentVolumeClaim:
        claimName: test-glusterfs
  containers:
    - name: task-pv-container
      image: nginx:alpine
      ports:
        - containerPort: 80
          name: "http-server"
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: task-pv-storage
  • 查看
[root@master ~]# kubectl get pod 
NAME          READY   STATUS    RESTARTS   AGE
task-pv-pod   1/1     Running   0          35s
[root@master ~]# kubectl exec -it task-pv-pod  sh 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # df -h
Filesystem                Size      Used Available Use% Mounted on
overlay                  39.2G      3.8G     33.6G  10% /
tmpfs                    64.0M         0     64.0M   0% /dev
tmpfs                     1.8G         0      1.8G   0% /sys/fs/cgroup
/dev/vda1                39.2G      3.8G     33.6G  10% /dev/termination-log
/dev/vda1                39.2G      3.8G     33.6G  10% /etc/resolv.conf
/dev/vda1                39.2G      3.8G     33.6G  10% /etc/hostname
/dev/vda1                39.2G      3.8G     33.6G  10% /etc/hosts
shm                      64.0M         0     64.0M   0% /dev/shm
192.168.11.64:vol_0c01bc93655b4ec989ee2372ca1dbbcf
                       1014.0M     42.8M    971.2M   4% /usr/share/nginx/html
tmpfs                     1.8G     12.0K      1.8G   0% /run/secrets/kubernetes.io/serviceaccount
tmpfs                     1.8G         0      1.8G   0% /proc/acpi
tmpfs                    64.0M         0     64.0M   0% /proc/kcore
tmpfs                    64.0M         0     64.0M   0% /proc/keys
tmpfs                    64.0M         0     64.0M   0% /proc/timer_list
tmpfs                    64.0M         0     64.0M   0% /proc/timer_stats
tmpfs                    64.0M         0     64.0M   0% /proc/sched_debug
tmpfs                     1.8G         0      1.8G   0% /proc/scsi
tmpfs                     1.8G         0      1.8G   0% /sys/firmware

四、测试glusterfs

1、测试pod写入,查看所有glusterfs节点的数据

  • pod中操作
/usr/share/nginx/html # seq 10 > index.html
  • 查看服务器的数据

[root@node1 ~]# ll /var/lib/heketi/mounts/vg_3632131cfe647d5d3b216bc36b883cfc/brick_6a7b9cc35798803c2ea0d7529992fdde/brick
total 4
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
[root@node2 ~]# ll /var/lib/heketi/mounts/vg_37fd5355b3bd1d51520e180600d0a2ad/brick_f03e793ffbcc6b2db4283d03df4dcc26/brick
total 4
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
[root@node3 ~]# ll /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick
total 4
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html

2、停止node3服务器后,测试写入

  • 在控制台停止node3服务器后,pod中的操作
/usr/share/nginx/html # echo down >node3.txt #停止node3服务器中。写入会卡住,卡住一会就好
  • 查看node1和node2服务器的数据(能正常写入数据)
[root@node1 ~]# ll /var/lib/heketi/mounts/vg_3632131cfe647d5d3b216bc36b883cfc/brick_6a7b9cc35798803c2ea0d7529992fdde/brick
total 8
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt
[root@node2 ~]# ll /var/lib/heketi/mounts/vg_37fd5355b3bd1d51520e180600d0a2ad/brick_f03e793ffbcc6b2db4283d03df4dcc26/brick
total 8
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt

3、停止node3服务器后。测试新建pvc

  • 新建pvc
  • 结论: 当glusterfs的复制数设置为3,同时只有3台存储服务器时。挂掉1台服务器后。新建的pvc无法使用(一直处于Pending状态)
[root@master ~]# cat  test1-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test1
  annotations:
    volume.beta.kubernetes.io/storage-class: "glusterfs"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi

[root@master ~]# kubectl apply -f test1-pvc.yaml 
persistentvolumeclaim/test1 created
[root@master ~]# kubectl get pvc
NAME             STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-glusterfs   Bound     pvc-8552824c-0632-4ccd-a89c-22d0bcf37146   1Gi        RWX            glusterfs      19m
test1            Pending     

4、启动node3服务器后,查看数据是否同步

  • 启动node3服务器后,在node3服务器上查看文件
  • 结论:glusterfs数据节点重启后,数据能同步
[root@node3 ~]# ll /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick
total 8
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt
[root@node3 ~]# cat  /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick/node3.txt 
down

5、在pod中写入数据,查看node3数据

  • pod操作
  • 结论: 能正常同步数据
[root@master ~]# kubectl exec -it task-pv-pod  sh 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # cd /usr/share/nginx/html
/usr/share/nginx/html # seq 5 > node3_test.txt
  • node3服务器查看
[root@node3 ~]# ll /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick/node3_test.txt 
-rw-r--r-- 2 root 2000 10 Jun 11 10:43 /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick/node3_test.txt

6、启动node3后查看之前Pending的pvc状态

  • 查看pvc
  • 结论: 之前Pending的pvc会变成正常状态
[root@master ~]# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-glusterfs   Bound    pvc-8552824c-0632-4ccd-a89c-22d0bcf37146   1Gi        RWX            glusterfs      27m
test1            Bound    pvc-37906249-0572-459c-bed1-36eee19d7c25   1Gi        RWX            glusterfs      8m49s

7、启动node3服务器后测试创建pvc

  • 创建新的pvc
  • 结论:能正常创建pvc并绑定
[root@master ~]# cp test1-pvc.yaml  test2-pvc.yaml 
[root@master ~]# sed -i "s#test1#test2#g" test2-pvc.yaml 
[root@master ~]# kubectl  apply -f test2-pvc.yaml 
persistentvolumeclaim/test2 created
[root@master ~]# kubectl  apply -f test2-pvc.yaml 
persistentvolumeclaim/test2 created
[root@master ~]# kubectl get pvc 
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-glusterfs   Bound    pvc-8552824c-0632-4ccd-a89c-22d0bcf37146   1Gi        RWX            glusterfs      29m
test1            Bound    pvc-37906249-0572-459c-bed1-36eee19d7c25   1Gi        RWX            glusterfs      10m
test2            Bound    pvc-d33240a5-4688-483c-adf4-79628675229e   1Gi        RWX            glusterfs      14s

8、停止node2、node3服务器测试写入

  • 停止node2和node3服务器后的操作
  • 结论:停止2台服务器。glusterfs存储不可用
[root@master ~]# kubectl exec -it task-pv-pod  sh 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # cd /usr/share/nginx/html  #停止node2与node3服务器时。操作挂载目录会卡住
sh: cd: can't cd to /usr/share/nginx/html: Socket not connected  #一会后会提示错误。无法进入挂载目录

9、启动node2和node3服务器。测试进入pod创建文件

  • 启动node2和node3服务器后pod操作
[root@master ~]# kubectl exec -it task-pv-pod  sh 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # cd /usr/share/nginx/html
/usr/share/nginx/html # echo   node2 node3 > up.txt

  • 查看node2、node3服务器数据
[root@node2 ~]# ll /var/lib/heketi/mounts/vg_37fd5355b3bd1d51520e180600d0a2ad/brick_f03e793ffbcc6b2db4283d03df4dcc26/brick/
total 16
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000 10 Jun 11 10:43 node3_test.txt
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt
-rw-r--r-- 2 root 2000 12 Jun 11 10:52 up.txt
[root@node2 ~]# cat  /var/lib/heketi/mounts/vg_37fd5355b3bd1d51520e180600d0a2ad/brick_f03e793ffbcc6b2db4283d03df4dcc26/brick/up.txt 
node2 node3

[root@node3 ~]# ll /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick
total 16
-rw-r--r-- 2 root 2000 21 Jun 11 10:30 index.html
-rw-r--r-- 2 root 2000 10 Jun 11 10:43 node3_test.txt
-rw-r--r-- 2 root 2000  5 Jun 11 10:33 node3.txt
-rw-r--r-- 2 root 2000 12 Jun 11 10:52 up.txt
[root@node3 ~]# cat  /var/lib/heketi/mounts/vg_326cd6fa573e44e04421588654311f7e/brick_7fedf3cf5601f1331b6786772a680f74/brick/up.txt 
node2 node3

10、启动node2和node3服务器后。测试创建pvc

  • 结论:启动服务器后,创建pvc能正常绑定
[root@master ~]# cp test2-pvc.yaml test4-pvc.yaml 
[root@master ~]# sed -i "s#test2#test4#g" test4-pvc.yaml 
[root@master ~]# kubectl  apply -f test4-pvc.yaml 
persistentvolumeclaim/test4 created
[root@master ~]# kubectl get pvc 
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-glusterfs   Bound    pvc-8552824c-0632-4ccd-a89c-22d0bcf37146   1Gi        RWX            glusterfs      37m
test1            Bound    pvc-37906249-0572-459c-bed1-36eee19d7c25   1Gi        RWX            glusterfs      18m
test2            Bound    pvc-d33240a5-4688-483c-adf4-79628675229e   1Gi        RWX            glusterfs      8m51s
test3            Bound    pvc-cfa37d9b-9079-43eb-a474-88d04ecc41b3   1Gi        RWX            glusterfs      4m45s
test4            Bound    pvc-1af68f5b-e00a-445b-8146-e7e8d6741653   1Gi        RWX            glusterfs      9s
posted @ 2021-06-11 14:17  巽逸  阅读(0)  评论(0编辑  收藏  举报  来源