Kubernetes部署动态存储
1:环境
Kubernetes:1.23.1
NFS:v4
Harbor:2.0
Docker:20.10.12
2:配置NFS服务器
[root@k8s-nfs ~]# yum install -y nfs-utils rpcbind
[root@k8s-nfs ~]# mkdir /data
[root@k8s-nfs ~]# cat /etc/exports
/data 10.0.0.0/8(rw,sync,no_root_squash)
[root@k8s-nfs ~]# showmount -e
Export list for k8s-nfs:
/data 10.0.0.0/8
[root@k8s-nfs ~]# systemctl enable nfs-server rpcbind --now
3:创建动态存储Pod
[root@k8s-master ~]# mkdir nfs-client
[root@k8s-master ~]# cd nfs-client/
# RBAC配置
[root@k8s-master nfs-client]# cat nfs-rbac.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
# 创建Pod配置
[root@k8s-master nfs-client]# cat nfs-client.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.kubernetes.com/library/nfs-subdir-external-provisioner:4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.0.0.14
- name: NFS_PATH
value: /data
volumes:
- name: nfs-client-root
nfs:
server: 10.0.0.14
path: /data
# 此处需要注意镜像地址与NFS地址,(我这里镜像使用的是私服)
# 创建SC文件
[root@k8s-master nfs-client]# cat sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs
parameters:
archiveOnDelete: "true"
# 创建资源
[root@k8s-master nfs-client]# kubectl apply -f .
serviceaccount/nfs-client-provisioner created
deployment.apps/nfs-client-provisioner created
serviceaccount/nfs-client-provisioner unchanged
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
storageclass.storage.k8s.io/managed-nfs-storage created
# 查看创建状态
[root@k8s-master nfs-client]# kubectl get pod,sc
NAME READY STATUS RESTARTS AGE
pod/nfs-client-provisioner-59dd74d64f-cjqvw 1/1 Running 0 31s
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
storageclass.storage.k8s.io/managed-nfs-storage fuseim.pri/ifs Delete Immediate false 31s
4:使用SC挂载
# 这边选择使用nginx来做
# yaml如下
[root@k8s-master nginx]# cat nginx.yaml
kind: Namespace
apiVersion: v1
metadata:
name: nginx
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nginx
namespace: nginx
spec:
storageClassName: "managed-nfs-storage"
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: registry.kubernetes.com/library/nginx:alpine
ports:
- containerPort: 80
volumeMounts:
- name: nginx
mountPath: /usr/share/nginx/html
volumes:
- name: nginx
persistentVolumeClaim:
claimName: nginx
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: nginx
labels:
app: nginx
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
selector:
app: nginx
# 这里我选择了用pvc来限制大小,也可以直接使用sc
[root@k8s-master nginx]# kubectl get pod,svc,sc,pvc -n nginx
NAME READY STATUS RESTARTS AGE
pod/nginx-7d78995fd-2hx4p 1/1 Running 0 56s
pod/nginx-7d78995fd-g68t7 1/1 Running 0 56s
pod/nginx-7d78995fd-gwg74 1/1 Running 0 56s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx NodePort 172.1.228.74 <none> 80:32257/TCP 56s
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
storageclass.storage.k8s.io/managed-nfs-storage fuseim.pri/ifs Delete Immediate false 37m
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/nginx Bound pvc-976633fe-ee6b-4e57-9ae8-0f34020f3b7b 1Gi RWX managed-nfs-storage 56s
5:测试
[root@k8s-master nginx]# curl 172.1.228.74
<html>
<head><title>403 Forbidden</title></head>
<body>
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.21.5</center>
</body>
</html>
# 因为没有文件写在挂载目录所以找不到或者是没权限访问index.html文件
[root@k8s-nfs nginx-nginx-pvc-976633fe-ee6b-4e57-9ae8-0f34020f3b7b]# echo "https://www.cnblogs.com/devoppsdu"> index.html
[root@k8s-nfs nginx-nginx-pvc-976633fe-ee6b-4e57-9ae8-0f34020f3b7b]# ls
index.html
# 写入文件
# 再次测试
[root@k8s-master nginx]# curl 172.1.228.74
https://www.cnblogs.com/devoppsdu
# 这里可以说一下,这样做的好处是你删除了pod之后数据还是会保存在存储上的 存储前会有"archived"标识
[root@k8s-master nginx]# kubectl delete -f nginx.yaml
namespace "nginx" deleted
persistentvolumeclaim "nginx" deleted
deployment.apps "nginx" deleted
service "nginx" deleted
[root@k8s-nfs archived-nginx-nginx-pvc-976633fe-ee6b-4e57-9ae8-0f34020f3b7b]# ll
total 4
-rw-r--r-- 1 root root 34 Dec 29 11:19 index.html