xone

  博客园 :: 首页 :: 博问 :: 闪存 :: 新随笔 :: 联系 :: 订阅 订阅 :: 管理 ::

kubernetes storageclass存储动态生成pv流程:

首先创建storageclass-->pvc请求已经创建的sc,通过sc来自动创建pv-->这样就达到通过storageclass动态生成一个pv的效果了

测试创建的pod直接挂载ceph rbd

# 在master1-admin上操作
scp /etc/yum.repos.d/ceph.repo test-k8s-master1:/etc/yum.repos.d/
scp /etc/yum.repos.d/ceph.repo test-k8s-master2:/etc/yum.repos.d/
scp /etc/yum.repos.d/ceph.repo test-k8s-master3:/etc/yum.repos.d/
# 在k8s各个节点上操作
yum -y install ceph-common
# 在master1-admin上操作
scp -r /etc/ceph test-k8s-master1:/etc/
scp -r /etc/ceph test-k8s-master2:/etc/
scp -r /etc/ceph test-k8s-master3:/etc/
# 测试pod直接挂载ceph的volume,在master1-admin上操作
ceph osd pool create k8spool 256
rbd create rbda -s 1024 -p k8spool
rbd feature disable k8spool/rbda object-map fast-diff deep-flatten
# 测试pod直接挂载创建的rbda
[root@test-k8s-master1 scripts]# cat test.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: testrbd
spec:
  containers:
    - image: nginx
      name: nginx
      volumeMounts:
      - name: testrbd
        mountPath: /mnt
  volumes:
    - name: testrbd
      rbd:
        monitors:
        - '192.168.1.167:6789'
        pool: k8spool
        image: rbda
        fsType: xfs
        readOnly: false
        user: admin
        keyring: /etc/ceph/ceph.client.admin.keyring

[root@test-k8s-master1 scripts]# kubectl apply -f test.yaml

基于ceph rbd创建pv,pvc

# 创建ceph-secret这个k8s secret对象,这个secret对象用于k8s volume插件访问ceph集群
# 获取client.admin的keyring值,并用base64编码,在master1-admin上操作
[root@master1-admin firstrbd]# ceph auth get-key client.admin | base64
QVFDZXFWZGovdnJrREJBQW1FRXFSOWI1d2MycE5mUlJPaEF6dWc9PQ==

# 创建ceph的secret
[root@test-k8s-master1 scripts]# cat ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret
data:
  key: QVFDZXFWZGovdnJrREJBQW1FRXFSOWI1d2MycE5mUlJPaEF6dWc9PQ==
[root@test-k8s-master1 scripts]# kubectl apply -f ceph-secret.yaml
secret/ceph-secret created
# 创建pool池,在master1-admin上操作
ceph osd pool create k8spool2 256
rbd create rbda -s 1024 -p k8spool2
rbd feature disable k8spool2/rbda object-map fast-diff deep-flatten
# 创建pv
[root@test-k8s-master1 scripts]# cat pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: ceph-pv
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  rbd:
    monitors:
      - 192.168.1.167:6789
    pool: k8spool2
    image: rbda
    user: admin
    secretRef:
      name: ceph-secret
    fsType: xfs
    readOnly: false
  persistentVolumeReclaimPolicy: Recycle
[root@test-k8s-master1 scripts]# kubectl apply -f pv.yaml
persistentvolume/ceph-pv created
[root@test-k8s-master1 scripts]# cat pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: ceph-pvc
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
[root@test-k8s-master1 scripts]# kubectl apply -f pvc.yaml 
persistentvolumeclaim/ceph-pvc created
[root@test-k8s-master1 scripts]# kubectl get pvc
NAME       STATUS   VOLUME    CAPACITY   ACCESS MODES   STORAGECLASS   AGE
ceph-pvc   Bound    ceph-pv   1Gi        RWO
storageclass动态生成pv,实现pod持久化存储
# 创建pool池,在master1-admin上操作
[root@master1-admin ~]#ceph osd pool create k8spool 256
[root@test-k8s-master1 ceph-storageclass]# cat rbd-provisioner.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rbd-provisioner
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: rbd-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: "quay.io/external_storage/rbd-provisioner:latest"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccountName: persistent-volume-binder
[root@test-k8s-master1 ceph-storageclass]# kubectl apply -f rbd-provisioner.yaml 
[root@test-k8s-master1 ceph-storageclass]# cat ceph-sc.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-storageclass-secret
  namespace: kube-system
data:
  key: QVFDZXFWZGovdnJrREJBQW1FRXFSOWI1d2MycE5mUlJPaEF6dWc9PQ==
type:
  kubernetes.io/rbd
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-sc
  annotations:
    storageclass.kubernetes.io/is-default-class: "false"
provisioner: ceph.com/rbd
parameters:
  monitors: 192.168.1.167:6789
  adminId: admin
  adminSecretName: ceph-storageclass-secret
  adminSecretNamespace: kube-system
  pool: k8spool
  userId: admin
  userSecretName: ceph-storageclass-secret
  userSecretNamespace: kube-system
  imageFormat: "2"
  imageFeatures: "layering"
allowVolumeExpansion: true
[root@test-k8s-master1 ceph-storageclass]# kubectl apply -f ceph-sc.yaml 
[root@test-k8s-master1 ceph-storageclass]# cat test-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: ceph-pod1
  namespace: ops
spec:
  containers:
  - name: ceph-busybox
    image: busybox
    command: ["sleep", "60000"]
    volumeMounts:
    - name: ceph-vol1
      mountPath: /usr/share/busybox
      readOnly: false
  volumes:
  - name: ceph-vol1
    persistentVolumeClaim:
      claimName: ceph-pvc-test1

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: ceph-pvc-test1
  namespace: ops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: ceph-sc
[root@test-k8s-master1 ceph-storageclass]# kubectl apply -f test-pod.yaml
pod/ceph-pod1 created
persistentvolumeclaim/ceph-pvc-test1 created
# 查看pool里的rbd,在master1-admin上操作
[root@master1-admin ~]# rbd ls --pool k8spool
kubernetes-dynamic-pvc-af98f5ba-5668-11ed-8f1a-568633827378
posted on 2022-10-26 18:25  周小百  阅读(147)  评论(0编辑  收藏  举报