k8s使用pvc,pv,sc关联ceph集群
目录
一.使用pvc和pv关联ceph集群
1.ceph集群创建镜像设备
[root@ceph141 ~]# rbd create -s 2G yinzhengjie-k8s/rbd-pv01 --image-feature layering,exclusive-lock
[root@ceph141 ~]#
[root@ceph141 ~]# rbd create -s 4G yinzhengjie-k8s/rbd-pv02 --image-feature layering,exclusive-lock
[root@ceph141 ~]#
[root@ceph141 ~]# rbd ls -p yinzhengjie-k8s | grep rbd
rbd-pv01
rbd-pv02
[root@ceph141 ~]#
2.查看ceph集群的admin账号的对应的KEY并进行base64编码
[root@ceph141 ~]# grep key /etc/ceph/ceph.client.admin.keyring | awk '{printf "%s", $NF}' | base64
QVFEakZycGx5dkZDRGhBQXBKZzExMVlNSUdRNi9GL3gvWStxcFE9PQ==
[root@ceph141 ~]#
3.删除默认的sc避免影响实验
[root@master231 pv-pvc]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
yinzhengjie-nfs-sc (default) yinzhengjie/linux Delete Immediate false 15d
[root@master231 pv-pvc]#
[root@master231 pv-pvc]# kubectl delete sc --all
storageclass.storage.k8s.io "yinzhengjie-nfs-sc" deleted
[root@master231 pv-pvc]#
[root@master231 pv-pvc]# kubectl get sc
No resources found
[root@master231 pv-pvc]#
4.编写资源清单
[root@master231 pv-pvc]# cat 01-pv-pvc-deploy-svc-ing-ceph.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-admin-secret
type: "kubernetes.io/rbd"
data:
# 指定ceph的admin的KEY,将其进行base64编码,此处需要修改!
key: QVFEakZycGx5dkZDRGhBQXBKZzExMVlNSUdRNi9GL3gvWStxcFE9PQ==
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: yinzhengjie-ceph-01
labels:
school: yinzhengjie
spec:
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
rbd:
image: rbd-pv01
monitors:
- 10.0.0.141:6789
- 10.0.0.142:6789
- 10.0.0.143:6789
pool: yinzhengjie-k8s
secretRef:
name: ceph-admin-secret
user: admin
capacity:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: yinzhengjie-ceph-02
labels:
school: yinzhengjie
spec:
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
rbd:
image: rbd-pv02
monitors:
- 10.0.0.141:6789
- 10.0.0.142:6789
- 10.0.0.143:6789
pool: yinzhengjie-k8s
secretRef:
name: ceph-admin-secret
user: admin
capacity:
storage: 3Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: yinzhengjie-ceph-pvc-01
spec:
# 引用指定的pv
volumeName: yinzhengjie-ceph-01
accessModes:
- ReadWriteMany
resources:
limits:
storage: 2Gi
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-volume-pvc
spec:
replicas: 1
selector:
matchLabels:
apps: ceph-pvc
template:
metadata:
labels:
apps: ceph-pvc
spec:
volumes:
- name: data
persistentVolumeClaim:
claimName: yinzhengjie-ceph-pvc-01
containers:
- name: c1
image: registry.cn-hangzhou.aliyuncs.com/yinzhengjie-k8s/apps:v1
volumeMounts:
- name: data
mountPath: /yinzhengjie-data
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: svc-ceph-pvc
spec:
selector:
apps: ceph-pvc
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-ceph-pvc
annotations:
# 指定Ingress controller的类型
kubernetes.io/ingress.class: traefik
spec:
# 指定Ingress controller的名称
ingressClassName: mytraefik
rules:
- host: v1.yinzhengjie.com
http:
paths:
- backend:
service:
name: svc-ceph-pvc
port:
number: 80
path: /
pathType: ImplementationSpecific
[root@master231 pv-pvc]#
5.创建资源
[root@master231 pv-pvc]# kubectl get po,svc,ing,pv,pvc
NAME READY STATUS RESTARTS AGE
pod/deploy-volume-pvc-5db454bd94-7l7c8 1/1 Running 0 23s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.200.0.1 <none> 443/TCP 4h14m
service/svc-ceph-pvc ClusterIP 10.200.23.116 <none> 80/TCP 23s
NAME CLASS HOSTS ADDRESS PORTS AGE
ingress.networking.k8s.io/ingress-ceph-pvc <none> v1.yinzhengjie.com 80 23s
ingress.networking.k8s.io/yinzhengjie-traefik-apps mytraefik v1.yinzhengjie.com 80 11d
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/yinzhengjie-ceph-01 2Gi RWX Retain Bound default/yinzhengjie-ceph-pvc-01 23s
persistentvolume/yinzhengjie-ceph-02 3Gi RWX Retain Available 23s
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/yinzhengjie-ceph-pvc-01 Bound yinzhengjie-ceph-01 2Gi RWX 23s
[root@master231 pv-pvc]#
[root@master231 pv-pvc]# kubectl describe ingress.networking.k8s.io/ingress-ceph-pvc
Name: ingress-ceph-pvc
Labels: <none>
Namespace: default
Address:
Default backend: default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
Rules:
Host Path Backends
---- ---- --------
v1.yinzhengjie.com
/ svc-ceph-pvc:80 (10.100.2.43:80)
Annotations: kubernetes.io/ingress.class: traefik
Events: <none>
[root@master231 pv-pvc]#
6.访问测试
http://v1.yinzhengjie.com/
二.k8s使用rbd的动态存储类关联ceph集群
推荐阅读:
https://github.com/ceph/ceph-csi/tree/release-v3.7/deploy/rbd/kubernetes
1.目录结构如下
[root@master231 rbd]# ll
total 24
-rw-r--r-- 1 root root 454 Nov 15 16:17 ceph-config-map.yaml
-rw-r--r-- 1 root root 392 Nov 15 16:18 csi-config-map.yaml
-rw-r--r-- 1 root root 358 Nov 15 16:20 csi-kms-config-map.yaml
-rw-r--r-- 1 root root 370 Nov 15 16:22 csi-rbd-secret.yaml
drwxr-xr-x 3 root root 17 Nov 15 15:22 deploy
-rw-r--r-- 1 root root 389 Nov 15 16:22 pvc.yaml
-rw-r--r-- 1 root root 833 Nov 15 16:22 storageclass.yaml
[root@master231 rbd]#
2.资源清单列表如下
https://gitee.com/jasonyin2020/cloud-computing-stack/tree/master/kubernetes/projects/ceph-cluster/sc
温馨提示:
需要根据自己的ceph集群环境,修改对应的集群配置即可。
3.清空default集群的所有资源,(可选操作,主要是看起来方便,生产环境别这么玩,测试环境可以这样搞)
kubectl delete all --all
4.安装rbd的sc
[root@master231 rbd]# pwd
/yinzhengjie/manifests/cloud-computing-stack/linux89/manifests/23-projects/06-ceph/sc/rbd
[root@master231 rbd]#
[root@master231 rbd]# kubectl apply -f deploy/rbd/kubernetes/
configmap/ceph-csi-config created
serviceaccount/rbd-csi-nodeplugin created
clusterrole.rbac.authorization.k8s.io/rbd-csi-nodeplugin created
clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-nodeplugin created
serviceaccount/rbd-csi-provisioner created
clusterrole.rbac.authorization.k8s.io/rbd-external-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role created
role.rbac.authorization.k8s.io/rbd-external-provisioner-cfg created
rolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role-cfg created
service/csi-rbdplugin-provisioner created
deployment.apps/csi-rbdplugin-provisioner created
daemonset.apps/csi-rbdplugin created
service/csi-metrics-rbdplugin created
csidriver.storage.k8s.io/rbd.csi.ceph.com created
[root@master231 rbd]#
[root@master231 rbd]#
[root@master231 rbd]# kubectl apply -f .
configmap/ceph-config created
configmap/ceph-csi-config configured
configmap/ceph-csi-encryption-kms-config created
secret/csi-rbd-secret created
persistentvolumeclaim/rbd-pvc01 created
persistentvolumeclaim/rbd-pvc02 created
storageclass.storage.k8s.io/csi-rbd-sc created
[root@master231 rbd]#
5.查看数据
[root@master231 rbd]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
csi-rbd-sc rbd.csi.ceph.com Delete Immediate true 4m55s
[root@master231 rbd]#
[root@master231 rbd]# kubectl get po,pv,pvc
NAME READY STATUS RESTARTS AGE
pod/csi-rbdplugin-lqsxt 3/3 Running 0 50s
pod/csi-rbdplugin-provisioner-5dfcf67885-5m9sj 7/7 Running 0 50s
pod/csi-rbdplugin-provisioner-5dfcf67885-92djh 7/7 Running 0 50s
pod/csi-rbdplugin-wmnm6 3/3 Running 0 50s
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/pvc-754a5284-5239-43be-99dd-03a5068f4b27 4Gi RWO Delete Bound default/rbd-pvc02 csi-rbd-sc 12s
persistentvolume/pvc-e6266518-9a17-42ac-aa60-e3c1f18f4696 2Gi RWO Delete Bound default/rbd-pvc01 csi-rbd-sc 12s
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/rbd-pvc01 Bound pvc-e6266518-9a17-42ac-aa60-e3c1f18f4696 2Gi RWO csi-rbd-sc 35s
persistentvolumeclaim/rbd-pvc02 Bound pvc-754a5284-5239-43be-99dd-03a5068f4b27 4Gi RWO csi-rbd-sc 35s
[root@master231 rbd]#
6.验证ceph集群是否动态创建了pv和ceph对应的镜像文件
[root@ceph141 ~]# rbd ls -p yinzhengjie-k8s
csi-vol-027f1235-c26b-11ee-b24f-f65a0eab89b3
csi-vol-027f580e-c26b-11ee-b24f-f65a0eab89b3
nginx-web
rbd-pv01
rbd-pv02
[root@ceph141 ~]#
本文来自博客园,作者:尹正杰,转载请注明原文链接:https://www.cnblogs.com/yinzhengjie/p/14351091.html,个人微信: "JasonYin2020"(添加时请备注来源及意图备注,有偿付费)
当你的才华还撑不起你的野心的时候,你就应该静下心来学习。当你的能力还驾驭不了你的目标的时候,你就应该沉下心来历练。问问自己,想要怎样的人生。