实现基于NFS的PV动态创建

一、准备nfs-server环境

[root@cyh-dell-rocky-8-6 ~]# virt-clone -o ubuntu20.04-template -n easzlab-k8s-nfs-01 -f /var/lib/libvirt/images/easzlab-k8s-nfs-01.qcow2 #克隆新的虚机
Allocating 'easzlab-k8s-nfs-01.qcow2'                                                                                                                                |  50 GB  00:01:18     
Clone 'easzlab-k8s-nfs-01' created successfully.
[root@cyh-dell-rocky-8-6 ~]# qemu-img create -f qcow2 -o preallocation=metadata /var/lib/libvirt/images/easzlab-k8s-nfs-01-1.qcow2  200g #创建200G数据盘
Formatting '/var/lib/libvirt/images/easzlab-k8s-nfs-01-1.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off preallocation=metadata compression_type=zlib size=214748364800 lazy_refcounts=off refcount_bits=16
[root@cyh-dell-rocky-8-6 ~]# qemu-img info /var/lib/libvirt/images/easzlab-k8s-nfs-01-1.qcow2 #查看磁盘信息
image: /var/lib/libvirt/images/easzlab-k8s-nfs-01-1.qcow2
file format: qcow2
virtual size: 200 GiB (214748364800 bytes)
disk size: 407 MiB
cluster_size: 65536
Format specific information:
    compat: 1.1
    compression type: zlib
    lazy refcounts: false
    refcount bits: 16
    corrupt: false
    extended l2: false
[root@cyh-dell-rocky-8-6 ~]# 
[root@cyh-dell-rocky-8-6 ~]# virsh start easzlab-k8s-nfs-01 #启动虚机
Domain 'easzlab-k8s-nfs-01' started
[root@cyh-dell-rocky-8-6 ~]# virsh attach-disk easzlab-k8s-nfs-01 /var/lib/libvirt/images/easzlab-k8s-nfs-01-1.qcow2 vdb --config --live --persistent #将磁盘挂载到虚机
Disk attached successfully
[root@cyh-dell-rocky-8-6 ~]# 
[root@ubuntu-20-04 ~]# lsblk
NAME                      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
loop0                       7:0    0 63.2M  1 loop /snap/core20/1623
loop1                       7:1    0 67.2M  1 loop /snap/lxd/21835
loop2                       7:2    0 67.8M  1 loop /snap/lxd/22753
loop3                       7:3    0   48M  1 loop /snap/snapd/17029
loop4                       7:4    0   62M  1 loop /snap/core20/1587
loop6                       7:6    0   48M  1 loop /snap/snapd/17336
vda                       252:0    0   50G  0 disk 
├─vda1                    252:1    0    1M  0 part 
├─vda2                    252:2    0  1.5G  0 part /boot
└─vda3                    252:3    0 48.5G  0 part 
  └─ubuntu--vg-ubuntu--lv 253:0    0 48.5G  0 lvm  /
vdb                       252:16   0  200G  0 disk 
[root@ubuntu-20-04 ~]# 
[root@ubuntu-20-04 ~]# parted /dev/vdb #创建磁盘分区
GNU Parted 3.3
Using /dev/vdb
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) mklabel gpt
(parted) mkpart primary 0% 100%                                           
(parted) q                                                                
Information: You may need to update /etc/fstab.

[root@ubuntu-20-04 ~]# mkfs.xfs /dev/vdb1  #格式化磁盘
meta-data=/dev/vdb1              isize=512    agcount=4, agsize=13109120 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1
data     =                       bsize=4096   blocks=52436480, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=25603, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@ubuntu-20-04 ~]# mkdir /data
[root@ubuntu-20-04 ~]# echo "/dev/vdb1 /data xfs defaults 0 0" >> /etc/fstab
[root@ubuntu-20-04 ~]# mount -a
[root@ubuntu-20-04 ~]# df -h
Filesystem                         Size  Used Avail Use% Mounted on
udev                               1.9G     0  1.9G   0% /dev
tmpfs                              394M  1.1M  393M   1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv   48G  6.7G   39G  15% /
tmpfs                              2.0G     0  2.0G   0% /dev/shm
tmpfs                              5.0M     0  5.0M   0% /run/lock
tmpfs                              2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/loop0                          64M   64M     0 100% /snap/core20/1623
/dev/vda2                          1.5G  106M  1.3G   8% /boot
/dev/loop1                          68M   68M     0 100% /snap/lxd/21835
/dev/loop2                          68M   68M     0 100% /snap/lxd/22753
/dev/loop4                          62M   62M     0 100% /snap/core20/1587
/dev/loop3                          48M   48M     0 100% /snap/snapd/17029
tmpfs                              394M     0  394M   0% /run/user/0
/dev/loop6                          48M   48M     0 100% /snap/snapd/17336
/dev/vdb1                          200G  1.5G  199G   1% /data
[root@ubuntu-20-04 ~]# 

二、安装nfs服务

安装nfs-server

apt install nfs-kernel-server -y
echo "/data 172.16.88.0/24(rw,sync,no_root_squash,no_subtree_check)" >> /etc/exports
exportfs -r

安装nfs-client

root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# ansible 'vm' -m shell -a "apt-get install nfs-common -y"

三、创建nfs-deployment部署文件

root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# cat nfs-deployment.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: kube-system

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["endpoints","services"]
    verbs: ["get", "list", "watch","create","update", "patch"]

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system 
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io

---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-provisioner-01
  namespace: kube-system
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-provisioner-01
  template:
    metadata:
      labels:
        app: nfs-provisioner-01
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-qingdao.aliyuncs.com/zhangshijie/nfs-subdir-external-provisioner:v4.0.2 
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              # 此处供应者名字供storageclass调用
              value: nfs-provisioner-01
            - name: NFS_SERVER
              value: 172.16.88.169
            - name: NFS_PATH
              value: /data
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.16.88.169
            path: /data

---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-dynamic-class
  annotations:
    storageclass.kubernetes.io/is-default-class: "false"
provisioner: nfs-provisioner-01
parameters:
  archiveOnDelete: "false"

创建pvc、pv部署文件

root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# vi nfs-pvc-create.yaml
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# vi nfs-pv-create.yaml
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# cat nfs-pvc-create.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nfs-pvc-test-claim
spec:
  storageClassName: nfs-dynamic-class 
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20G
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# cat nfs-pv-create.yaml 
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: busybox:stable
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: nfs-pvc-test-claim
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 

四、验证storageClass、pvc、pv创建

root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# kubectl apply -f nfs-deployment.yaml 
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
deployment.apps/nfs-provisioner-01 created
storageclass.storage.k8s.io/nfs-dynamic-class created
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# kubectl get pod -n kube-system
NAME                                       READY   STATUS    RESTARTS      AGE
calico-kube-controllers-5c8bb696bb-5nrbl   1/1     Running   3 (27h ago)   2d8h
calico-node-68k76                          1/1     Running   2 (27h ago)   2d6h
calico-node-bz8pp                          1/1     Running   2 (27h ago)   2d6h
calico-node-cfl2g                          1/1     Running   2 (27h ago)   2d6h
calico-node-hsrfh                          1/1     Running   2 (27h ago)   2d6h
calico-node-j8kgf                          1/1     Running   2 (27h ago)   2d6h
calico-node-kdb5j                          1/1     Running   2 (27h ago)   2d6h
calico-node-mqmhp                          1/1     Running   2 (27h ago)   2d6h
calico-node-x6ctf                          1/1     Running   2 (27h ago)   2d6h
calico-node-xh79g                          1/1     Running   2             2d6h
coredns-69548bdd5f-9md6c                   1/1     Running   2 (27h ago)   2d7h
coredns-69548bdd5f-n6rvg                   1/1     Running   2 (27h ago)   2d7h
kuboard-7dc6ffdd7c-njhnb                   1/1     Running   2 (27h ago)   2d7h
metrics-server-686ff776cf-dbgq6            1/1     Running   4 (26h ago)   2d2h
nfs-provisioner-01-599b7bfc9d-bpq24        1/1     Running   0             17s
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# kubectl apply -f nfs-pvc-create.yaml 
persistentvolumeclaim/nfs-pvc-test-claim created
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# kubectl get pvc -A
NAMESPACE   NAME                 STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS             AGE
default     mysql-data-pvc       Bound    pvc-63eb1701-349d-4eb4-a9ab-2b7476885b43   20Gi       RWO            ceph-storage-class-k8s   2d2h
default     nfs-pvc-test-claim   Bound    pvc-7d4ba048-d9ca-402f-8907-9a30b12752cf   20G        RWX            nfs-dynamic-class        13s
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# kubectl apply -f nfs-pv-create.yaml 
pod/test-pod created
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# kubectl get pv -A
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                        STORAGECLASS             REASON   AGE
pvc-63eb1701-349d-4eb4-a9ab-2b7476885b43   20Gi       RWO            Delete           Bound    default/mysql-data-pvc       ceph-storage-class-k8s            2d2h
pvc-7d4ba048-d9ca-402f-8907-9a30b12752cf   20G        RWX            Delete           Bound    default/nfs-pvc-test-claim   nfs-dynamic-class                 13m
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# 
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc# kubectl get pod
NAME                     READY   STATUS      RESTARTS      AGE
mysql-77d55bfdd8-cbtcz   1/1     Running     2 (27h ago)   2d2h
test-pod                 0/1     Completed   0             55s
root@easzlab-deploy:~/jiege-k8s/nfs-pv-pvc#

 

posted @ 2022-10-21 01:23  cyh00001  阅读(187)  评论(0编辑  收藏  举报