k8s使用ceph存储rbd和cephFS实操

1.配置存储池和k8s集群host解析ceph-common客户端安装

#创建存储池
ceph osd pool create shijie-rbd-pool1 32 32
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool create shijie-rbd-pool1 32 32
pool 'shijie-rbd-pool1' created

#查看存储池
cephadmin@ceph-deploy:/etc/ceph$ ceph osd pool ls |grep shijie-rbd-pool1
shijie-rbd-pool1


#在存储池启用块存储功能
ceph osd pool application enable shijie-rbd-pool1 rbd
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool application enable shijie-rbd-pool1 rbd
enabled application 'rbd' on pool 'shijie-rbd-pool1'


#初始化存储池
rbd pool init -p shijie-rbd-pool1

#创建镜像是给k8s挂载
rbd create shijie-img-img1 --size 3G --pool shijie-rbd-pool1 --image-format 2 --image-feature layering
shijie-img-img1镜像名称
--size 3G大小
shijie-rbd-pool1指定在哪个存储池
--image-format 2指定镜像格式
--image-feature layering指定特性

#验证镜像是否存在
rbd ls --pool shijie-rbd-pool1
cephadmin@ceph-deploy:~/ceph-cluster$ rbd ls --pool shijie-rbd-pool1
shijie-img-img1

#验证镜像信息
rbd --image shijie-img-img1 --pool shijie-rbd-pool1 info
cephadmin@ceph-deploy:~$ rbd --image shijie-img-img1 --pool shijie-rbd-pool1 info
rbd image 'shijie-img-img1':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 123035a0fee6
    block_name_prefix: rbd_data.123035a0fee6
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Tue Jan 31 21:12:44 2023
    access_timestamp: Tue Jan 31 21:12:44 2023
    modify_timestamp: Tue Jan 31 21:12:44 2023


#在k8s集群所有master和node导入key验证安装ceph-common的包
cat >/etc/apt/sources.list<<'EOF'
deb http://mirrors.ustc.edu.cn/ubuntu/ focal main restricted universe multiverse
deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal main restricted universe multiverse

deb http://mirrors.ustc.edu.cn/ubuntu/ focal-security main restricted universe multiverse
deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal-security main restricted universe multiverse

deb http://mirrors.ustc.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal-updates main restricted universe multiverse

deb http://mirrors.ustc.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal-backports main restricted universe multiverse

## Not recommended
# deb http://mirrors.ustc.edu.cn/ubuntu/ focal-proposed main restricted universe multiverse
# deb-src http://mirrors.ustc.edu.cn/ubuntu/ focal-proposed main restricted universe multiverse
EOF
#更新镜像源和校验安装包
wget -q -O- 'http://mirrors.ustc.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
echo deb http://mirrors.ustc.edu.cn/ceph/debian-pacific/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
apt-get update && apt update


#查看可以安装的ceph-common的版本
apt-cache madison ceph-common


#k8s集群所有节点安装ceph-common,安装ceph-common才会有/etc/ceph的目录
apt install ceph-common -y


#在ceph-deploy节点上创建用户和授权
ceph auth get-or-create client.shijie mon 'allow r' osd 'allow * pool=shijie-rbd-pool1'
cephadmin@ceph-deploy:/etc/ceph$ ceph auth get-or-create client.shijie mon 'allow r' osd 'allow * pool=shijie-rbd-pool1'
[client.shijie]
    key = AQAs69lj9A1zBhAAIqRnzmzjdmoLab1ZnSjHPA==



    

#验证用户权限
ceph auth get client.shijie    
cephadmin@ceph-deploy:/etc/ceph$ ceph auth get client.shijie 
[client.shijie]
    key = AQAs69lj9A1zBhAAIqRnzmzjdmoLab1ZnSjHPA==
    caps mon = "allow r"
    caps osd = "allow * pool=shijie-rbd-pool1"
exported keyring for client.shijie




#导出用户验证信息到一个文件上面
ceph auth get client.shijie -o ceph.client.shijie.keyring
cephadmin@ceph-deploy:/etc/ceph$ sudo ceph auth get client.shijie -o ceph.client.shijie.keyring
exported keyring for client.shijie

#查看导出的用户信息
cephadmin@ceph-deploy:/etc/ceph$ cat ceph.client.shijie.keyring
[client.shijie]
    key = AQAs69lj9A1zBhAAIqRnzmzjdmoLab1ZnSjHPA==
    caps mon = "allow r"
    caps osd = "allow * pool=shijie-rbd-pool1"


#把ceph的配置文件ceph.conf和普通用户的认证client.magedu-shijie.keyring文件和mds,mgr,osd,rgw,的keyring文件拷贝到k8s结群各个节点
cephadmin@ceph-deploy:/etc/ceph$ sudo scp /etc/ceph/ceph.conf ceph.client.shijie.keyring ceph.bootstrap* root@192.168.10.111:/etc/ceph
root@192.168.10.111's password: 
ceph.conf                                                                                                                                                                                                                                   100%  492     1.0MB/s   00:00    
ceph.client.shijie.keyring                                                                                                                                                                                                                  100%  130   288.1KB/s   00:00    
ceph.bootstrap-mds.keyring                                                                                                                                                                                                                  100%  113   231.9KB/s   00:00    
ceph.bootstrap-mgr.keyring                                                                                                                                                                                                                  100%  113   285.2KB/s   00:00    
ceph.bootstrap-osd.keyring                                                                                                                                                                                                                  100%  113   280.7KB/s   00:00    
ceph.bootstrap-rgw.keyring
sudo scp /etc/ceph/ceph.conf /etc/ceph/ceph.client.shijie.keyring /etc/ceph/ceph.bootstrap* root@192.168.10.112:/etc/ceph
sudo scp /etc/ceph/ceph.conf /etc/ceph/ceph.client.shijie.keyring /etc/ceph/ceph.bootstrap* root@192.168.10.113:/etc/ceph
sudo scp /etc/ceph/ceph.conf /etc/ceph/ceph.client.shijie.keyring /etc/ceph/ceph.bootstrap* root@192.168.10.114:/etc/ceph
sudo scp /etc/ceph/ceph.conf /etc/ceph/ceph.client.shijie.keyring /etc/ceph/ceph.bootstrap* root@192.168.10.115:/etc/ceph
sudo scp /etc/ceph/ceph.conf /etc/ceph/ceph.client.shijie.keyring /etc/ceph/ceph.bootstrap* root@192.168.10.116:/etc/ceph

#到node节点认证ceph的权限,查看整个ceph集群的状态
--user shijie指定普通用户
ceph --user shijie -s
root@k8s-node1:~# ceph --user shijie -s

  cluster:
    id:     511a9f2f-c2eb-4461-9472-a2074f266bba
    health: HEALTH_WARN
            clock skew detected on mon.ceph-mon3-ceph-mds3-node3
 
  services:
    mon: 3 daemons, quorum ceph-mon1-ceph-mds1-node1,ceph-mon2-ceph-mds2-node2,ceph-mon3-ceph-mds3-node3 (age 47h)
    mgr: ceph-mgr1-ceph-rgw1(active, since 47h), standbys: ceph-mgr2-ceph-rgw2
    mds: 1/1 daemons up, 3 standby
    osd: 12 osds: 12 up (since 47h), 12 in (since 47h)
    rgw: 2 daemons active (2 hosts, 1 zones)
 
  data:
    volumes: 1/1 healthy
    pools:   8 pools, 257 pgs
    objects: 247 objects, 7.2 KiB
    usage:   665 MiB used, 479 GiB / 480 GiB avail
    pgs:     257 active+clean

#在node节点验证是否能访问到镜像
rbd --id shijie ls --pool=shijie-rbd-pool1
root@k8s-node1:~# rbd --id shijie ls --pool=shijie-rbd-pool1
shijie-img-img1

#node节点手工测试镜像挂载
rbd --id shijie -p shijie-rbd-pool1 map shijie-img-img1
#手工取消进行挂载
rbd --id shijie -p shijie-rbd-pool1 unmap shijie-img-img1
#删除镜像
rbd rm --pool shijie-rbd-pool1 --image shijie-img-img1


#在k8s集群的node和master节点上放ceph集群的hosts解析
cat >>/etc/hosts<<'EOF'
192.168.10.100 ceph-deploy1
192.168.10.101 ceph-mgr1-ceph-rgw1
192.168.10.102 ceph-mgr2-ceph-rgw2
192.168.10.103 ceph-mon1-ceph-mds1-node1
192.168.10.104 ceph-mon2-ceph-mds2-node2
192.168.10.105 ceph-mon3-ceph-mds3-node3
EOF

 

2.k8s集群直接使用ceph的rbd

#配置文件说明
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: busybox 
    command:
      - sleep
      - "3600"
    imagePullPolicy: Always 
    name: busybox
    #restartPolicy: Always
    volumeMounts:                 #声明挂载列表
    - name: rbd-data1             #指定挂载卷的名称
      mountPath: /data            #指定挂载到pod的目的路径
  volumes:                        #声明卷列表
    - name: rbd-data1             #卷的名称
      rbd:                        #类型是rbd
        monitors:                 #声明填写mon服务器列表
        - '172.31.6.101:6789'     #填写mon服务器ip地址+端口
        - '172.31.6.102:6789'
        - '172.31.6.103:6789'
        pool: shijie-rbd-pool1    #指定存储池的名称
        image: shijie-img-img1    #指定存储池的镜像
        fsType: ext4              #指定文件系统的类型,类型有ext4和xfs,pod自动挂载镜像后会自动格式化指定的格式
        readOnly: false           #false不是只读,true只读的话pod就没有办法往里面写数据了
        user: magedu-shijie       #指定访问ceph集群的普通用户,也可以指定admin
        keyring: /etc/ceph/ceph.client.magedu-shijie.keyring #指定keyring文件的所在node节点宿主机的目录,没有keyring文件会认证失败

#基于ceph普通用户实现存储卷实操
cat >case1-busybox-keyring.yaml<<'EOF'
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: busybox 
    command:
      - sleep
      - "3600"
    imagePullPolicy: Always 
    name: busybox
    #restartPolicy: Always
    volumeMounts:
    - name: data1
      mountPath: /data
  volumes:
    - name: data1
      rbd:
        monitors:
        - '192.168.10.103:6789'
        - '192.168.10.104:6789'
        - '192.168.10.105:6789'
        pool: shijie-rbd-pool1
        image: shijie-img-img1
        fsType: ext4
        readOnly: false
        user: shijie
        keyring: /etc/ceph/ceph.client.shijie.keyring
EOF

#创建镜像
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case1-busybox-keyring.yaml
pod/busybox created

#检查
root@k8s-master1-etcd1-haproxy1:/tools# kubectl get pod -o wide
NAME        READY   STATUS    RESTARTS   AGE    IP               NODE             NOMINATED NODE   READINESS GATES
busybox     1/1     Running   0          60m    10.200.36.122    192.168.10.114   <none>           <none>
net-test1   1/1     Running   0          174m   10.200.107.225   192.168.10.116   <none>           <none>
#进入容器检查
root@k8s-master1-etcd1-haproxy1:/tools# kubectl exec -it busybox /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # 
/ # df -h
Filesystem                Size      Used Available Use% Mounted on
overlay                  44.0G     10.7G     31.0G  26% /
tmpfs                    64.0M         0     64.0M   0% /dev
tmpfs                     1.9G         0      1.9G   0% /sys/fs/cgroup
/dev/rbd0                 2.9G     24.0K      2.9G   0% /data    #挂载的ceph目录
/dev/mapper/ubuntu--vg-lv--0
                         44.0G     10.7G     31.0G  26% /dev/termination-log
/dev/mapper/ubuntu--vg-lv--0
                         44.0G     10.7G     31.0G  26% /etc/resolv.conf
/dev/mapper/ubuntu--vg-lv--0
                         44.0G     10.7G     31.0G  26% /etc/hostname
/dev/mapper/ubuntu--vg-lv--0
                         44.0G     10.7G     31.0G  26% /etc/hosts
shm                      64.0M         0     64.0M   0% /dev/shm
tmpfs                     1.9G     12.0K      1.9G   0% /var/run/secrets/kubernetes.io/serviceaccount
tmpfs                     1.9G         0      1.9G   0% /proc/acpi
tmpfs                    64.0M         0     64.0M   0% /proc/kcore
tmpfs                    64.0M         0     64.0M   0% /proc/keys
tmpfs                    64.0M         0     64.0M   0% /proc/timer_list
tmpfs                    64.0M         0     64.0M   0% /proc/sched_debug
tmpfs                     1.9G         0      1.9G   0% /proc/scsi
tmpfs                     1.9G         0      1.9G   0% /sys/firmware


#写入数据测试
/ # echo test11 >>/data/test1.log
/ # cat /data/test1.log
test11

#在192.168.10.114的node节点检查,挂载基于宿主机的内核的,所以是先挂载到宿主机然后在挂载到pod
root@k8s-node1:/etc/ceph# lsblk 
NAME                 MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
fd0                    2:0    1    4K  0 disk 
loop0                  7:0    0 70.3M  1 loop /snap/lxd/21029
loop1                  7:1    0 49.8M  1 loop /snap/snapd/17950
loop3                  7:3    0 49.6M  1 loop /snap/snapd/17883
loop4                  7:4    0 63.3M  1 loop /snap/core20/1778
loop5                  7:5    0 55.6M  1 loop /snap/core18/2667
loop6                  7:6    0 91.9M  1 loop /snap/lxd/24061
loop7                  7:7    0 55.6M  1 loop /snap/core18/2679
sda                    8:0    0   50G  0 disk 
├─sda1                 8:1    0    1M  0 part 
├─sda2                 8:2    0    1G  0 part 
└─sda3                 8:3    0   49G  0 part 
  ├─ubuntu--vg-lv--0 253:0    0   45G  0 lvm  /
  └─ubuntu--vg-lv--1 253:1    0    4G  0 lvm  /boot
sr0                   11:0    1  1.2G  0 rom  
rbd0                 252:0    0    3G  0 disk /var/lib/kubelet/pods/6b5a324b-ac83-4827-8b94-b6fc73283b8c/volumes/kubernetes.io~rbd/data1

#删除
root@k8s-master1-etcd1-haproxy1:/tools# kubectl delete -f case1-busybox-keyring.yaml 
pod "busybox" deleted

#测试nginx容器挂载
#配置文件说明
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:                 #声明挂载列表
        - name: rbd-data1             #指定挂载卷的名称
          mountPath: /data            #指定挂载到pod的目的路径
      volumes:                        #声明卷列表
        - name: rbd-data1             #卷的名称
          rbd:                        #类型是rbd
            monitors:                 #声明填写mon服务器列表
            - '172.31.6.101:6789'     #填写mon服务器ip地址+端口
            - '172.31.6.102:6789'
            - '172.31.6.103:6789'
            pool: shijie-rbd-pool1    #指定存储池的名称
            image: shijie-img-img1    #指定存储池的镜像
            fsType: ext4              #指定文件系统的类型,类型有ext4和xfs
            readOnly: false           #false不是只读,true只读的话pod就没有办法往里面写数据了
            user: magedu-shijie       #指定访问ceph集群的普通用户,也可以指定admin
            keyring: /etc/ceph/ceph.client.magedu-shijie.keyring   #指定keyring文件的所在node节点宿主机的目录,没有keyring文件会认证失败
            
#创建配置文件
cat >case2-nginx-keyring.yaml<<'EOF' 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /data
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '192.168.10.103:6789'
            - '192.168.10.104:6789'
            - '192.168.10.105:6789'
            pool: shijie-rbd-pool1
            image: shijie-img-img1
            fsType: ext4
            readOnly: false
            user: shijie
            keyring: /etc/ceph/ceph.client.shijie.keyring
EOF

#创建
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case2-nginx-keyring.yaml 
deployment.apps/nginx-deployment created

#检查
root@k8s-master1-etcd1-haproxy1:/tools# kubectl get pod -o wide
NAME                                READY   STATUS    RESTARTS   AGE   IP               NODE             NOMINATED NODE   READINESS GATES
net-test1                           1/1     Running   0          8h    10.200.107.225   192.168.10.116   <none>           <none>
nginx-deployment-84778799bf-vs44t   1/1     Running   0          52m   10.200.36.123    192.168.10.114   <none>           <none>

##进入里面查看,挂载都是先在宿主机进行挂载,在挂载到pod里面去
root@k8s-master1-etcd1-haproxy1:/tools# kubectl exec -it nginx-deployment-84778799bf-vs44t /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# df -h
Filesystem                    Size  Used Avail Use% Mounted on
overlay                        44G   11G   32G  26% /
tmpfs                          64M     0   64M   0% /dev
tmpfs                         2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/rbd0                     2.9G   28K  2.9G   1% /data  挂载成功都是先挂载到pod在挂载到node节点
/dev/mapper/ubuntu--vg-lv--0   44G   11G   32G  26% /etc/hosts
shm                            64M     0   64M   0% /dev/shm
tmpfs                         2.0G   12K  2.0G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                         2.0G     0  2.0G   0% /proc/acpi
tmpfs                         2.0G     0  2.0G   0% /proc/scsi
tmpfs                         2.0G     0  2.0G   0% /sys/firmware

#之前创建的数据还是在
# ls -l /data       
total 20
drwx------ 2 root root 16384 Feb  1 09:08 lost+found
-rw-r--r-- 1 root root     7 Feb  1 09:35 test1.log
# cat /data/test1.log
test11
#再次创建测试数据
# cat /data/test1.log
test11
# echo test22 >> /data/test1.log
# echo test33 >> /data/test1.log                      
# cat /data/test1.log
test11
test22
test33

#删除
root@k8s-master1-etcd1-haproxy1:/tools# kubectl delete -f case2-nginx-keyring.yaml 
deployment.apps "nginx-deployment" deleted
 

 

3.k8s使用secret挂载rbd实操

定义seret文件创建
#配置文件详细
apiVersion: v1
kind: Secret                               #控制器类型为 Secret                           
metadata:
  name: ceph-secret-magedu-shijie          #secret的名称
type: "kubernetes.io/rbd"                  #类型为rbd
data:                                      #指定key
  key: QVFDbm1HSmg2L0dCTGhBQWtXQlRUTmg2R1RHWGpreXFtdFo5RHc9PQo=    #指定ceph集群用户key认证信息,这里使用普通用户,使用base64位加密的数据 
  
  
#拿到普通用户的的keyring秘钥
root@ceph-deploy:/etc/ceph# cat ceph.client.shijie.keyring
[client.shijie]
    key = AQAs69lj9A1zBhAAIqRnzmzjdmoLab1ZnSjHPA==
    caps mon = "allow r"
    caps osd = "allow * pool=shijie-rbd-pool1"

#基于普通用户的keyring秘钥进行base64加密
root@k8s-master1-etcd1-haproxy1:/tools# echo AQAs69lj9A1zBhAAIqRnzmzjdmoLab1ZnSjHPA== | base64
QVFBczY5bGo5QTF6QmhBQUlxUm56bXpqZG1vTGFiMVpuU2pIUEE9PQo=
#解密方法
root@k8s-master1-etcd1-haproxy1:/tools# echo QVFBczY5bGo5QTF6QmhBQUlxUm56bXpqZG1vTGFiMVpuU2pIUEE9PQo= | base64 -d
AQAs69lj9A1zBhAAIqRnzmzjdmoLab1ZnSjHPA==

#修改yaml配置文件
cat >case3-secret-client-shijie.yaml<<'EOF'
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-magedu-shijie
type: "kubernetes.io/rbd"
data:
  key: QVFBczY5bGo5QTF6QmhBQUlxUm56bXpqZG1vTGFiMVpuU2pIUEE9PQo=
EOF

#创建secret
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case3-secret-client-shijie.yaml 
secret/ceph-secret-magedu-shijie created

#配置文件说明
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:                 #声明挂载列表
        - name: rbd-data1             #指定挂载卷的名称
          mountPath: /data            #指定挂载到pod的目的路径
      volumes:                        #声明卷列表
        - name: rbd-data1             #卷的名称
          rbd:                        #类型是rbd
            monitors:                 #声明填写mon服务器列表
            - '172.31.6.101:6789'     #填写mon服务器ip地址+端口
            - '172.31.6.102:6789'
            - '172.31.6.103:6789'
            pool: shijie-rbd-pool1    #指定存储池的名称
            image: shijie-img-img1    #指定存储池的镜像
            fsType: ext4              #指定文件系统的类型,类型有ext4和xfs
            readOnly: false           #false不是只读,true只读的话pod就没有办法往里面写数据了
            user: magedu-shijie       #指定访问ceph集群的普通用户,也可以指定admin
            secretRef:                #调用secret
              name: ceph-secret-magedu-shijie   #指定调用secret的名称

#修改pod的yaml文件
cat >case4-nginx-secret.yaml<<'EOF'
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: data1
          mountPath: /data
      volumes:
        - name: data1
          rbd:
            monitors:
            - '192.168.10.103:6789'
            - '192.168.10.104:6789'
            - '192.168.10.105:6789'
            pool: shijie-rbd-pool1
            image: shijie-img-img1
            fsType: ext4
            readOnly: false
            user: shijie
            secretRef:
              name: ceph-secret-magedu-shijie 
EOF

#创建
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case4-nginx-secret.yaml 
deployment.apps/nginx-deployment created

#创建成功
root@k8s-master1-etcd1-haproxy1:/tools# kubectl get pod -o wide
NAME                               READY   STATUS    RESTARTS   AGE    IP               NODE             NOMINATED NODE   READINESS GATES
net-test1                          1/1     Running   0          47h    10.200.107.225   192.168.10.116   <none>           <none>
nginx-deployment-9ccc44b5b-6jzhk   1/1     Running   0          104m   10.200.36.124    192.168.10.114   <none>           <none>


#进入pod检查之前创建的数据
kubectl exec -it nginx-deployment-9ccc44b5b-6jzhk /bin/sh
#检查挂载
# df -TH
Filesystem                   Type     Size  Used Avail Use% Mounted on
overlay                      overlay   48G   12G   33G  27% /
tmpfs                        tmpfs     68M     0   68M   0% /dev
tmpfs                        tmpfs    2.1G     0  2.1G   0% /sys/fs/cgroup
/dev/rbd0                    ext4     3.1G   29k  3.1G   1% /data   挂载的RBD的目录,先挂载到宿主机在挂载到pod里面
/dev/mapper/ubuntu--vg-lv--0 ext4      48G   12G   33G  27% /etc/hosts
shm                          tmpfs     68M     0   68M   0% /dev/shm
tmpfs                        tmpfs    2.1G   13k  2.1G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                        tmpfs    2.1G     0  2.1G   0% /proc/acpi
tmpfs                        tmpfs    2.1G     0  2.1G   0% /proc/scsi
tmpfs                        tmpfs    2.1G     0  2.1G   0% /sys/firmware


#查看之前的测试数据
# ls /data
lost+found  test1.log
# cat /data/test1.log
test11
test22
test33

#删除
root@k8s-master1-etcd1-haproxy1:/tools# kubectl delete -f case4-nginx-secret.yaml 
deployment.apps "nginx-deployment" deleted

 

4.k8s动态创建存ceph的储卷

#拿到admin的key进行加密
root@ceph-deploy:/etc/ceph# cat ceph.client.admin.keyring
[client.admin]
    key = AQAsW9djkfL6FhAA1ttQXjCZW22LGmhQ3Mrhqg==
    caps mds = "allow *"
    caps mgr = "allow *"
    caps mon = "allow *"
    caps osd = "allow *"
    


#加密keyring
root@ceph-deploy:/etc/ceph# echo AQAsW9djkfL6FhAA1ttQXjCZW22LGmhQ3Mrhqg== | base64
QVFBc1c5ZGprZkw2RmhBQTF0dFFYakNaVzIyTEdtaFEzTXJocWc9PQo=

#配置文件说明case5-secret-admin.yaml
apiVersion: v1                             #api的版本为v1
kind: Secret                               #控制器类型为 Secret 
metadata:
  name: ceph-secret-admin                  #secret的名称
type: "kubernetes.io/rbd"                  #类型为rbd
data:                                      #指定key
  key: QVFBM2RoZGhNZC9VQUJBQXIyU05wSitoY0sxZEQ1bDJIajVYTWc9PQo=     #指定ceph集群用户key认证信息,这里使用admin,使用base64位加密的数据   
  
  
#修改配置文件
cat >case5-secret-admin.yaml<<'EOF'
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
  key: QVFBc1c5ZGprZkw2RmhBQTF0dFFYakNaVzIyTEdtaFEzTXJocWc9PQo=
EOF

#pod用来挂载rbd实验普通用户的secret配置文件case3-secret-client-shijie.yaml
root@k8s-master1-etcd1-haproxy1:/tools# cat case3-secret-client-shijie.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-magedu-shijie
type: "kubernetes.io/rbd"
data:
  key: QVFBczY5bGo5QTF6QmhBQUlxUm56bXpqZG1vTGFiMVpuU2pIUEE9PQo=  #此为普通用户shijie的key认证信息

#创建pod挂载使用的secret
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case3-secret-client-shijie.yaml 
secret/ceph-secret-magedu-shijie created


#创建secret
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case5-secret-admin.yaml
secret/ceph-secret-admin created

#检查secret
root@k8s-master1-etcd1-haproxy1:/tools# kubectl get Secret
NAME                        TYPE                                  DATA   AGE
ceph-secret-admin           kubernetes.io/rbd                     1      3h36m


#存储类配置文件说明case6-ceph-storage-class.yaml
apiVersion: storage.k8s.io/v1             #api的版本为storage.k8s.io/v1
kind: StorageClass                        #控制器的类型为存储类
metadata:
  name: ceph-storage-class-shijie         #指定存储类的名称
  annotations:
    storageclass.kubernetes.io/is-default-class: "true" #设置为默认的存储类,设置默认以后,以后凡是创建pv都是调用ceph去创建
provisioner: kubernetes.io/rbd                          #指定类型为rbd
parameters:
  monitors: 172.31.6.101:6789,172.31.6.102:6789,172.31.6.103:6789 #指定mon服务器ip
  adminId: admin                                        #使用admin的id
  adminSecretName: ceph-secret-admin            #指定secret的名称,通过此名称找到secret创建admin加密的keyring文件密钥,通过ceph的认证k8s才能创建pv
  adminSecretNamespace: default                 #定secret所在的namespace通过此namespace找到这个secret,要挂载存储类的pod也要放在这个namesp
  pool: shijie-rbd-pool1                        #指定存储池,rbd存储池的镜像会自己创建
  userId: magedu-shijie                         #指定普通用户的id,普通用户给pod用来挂载
  userSecretName: ceph-secret-admin    #指定普通用户的secret名称,通过此名称找到secret创建普通用户加密的keyring文件密钥,通过ceph的认证,pod使用

#pvc配置文件说明case7-mysql-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim                     #类型是pvc
metadata:
  name: mysql-data-pvc                          #pvc的名称
spec:
  accessModes:                                  #指定pvc的读写权限
    - ReadWriteOnce                             #PVC只能被单个pod节点以读写权限挂载,RWO
  storageClassName: ceph-storage-class-shijie   #指定调用的存储类去创建pvc,这个时候k8s会连接ceph集群创建pv然后在创建pvc,在把pv和pvc关联起来,关联起来pod就可以使用了
  resources:                                    #指定pvc大小配置列表
    requests:                                   #默认配置选项
      storage: '5Gi'                            #指定pvc的大小,不能超过rbd存储池的大小

#修改存储类配置文件
cat >case6-ceph-storage-class.yaml<<'EOF'
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-storage-class-shijie
  annotations:
    storageclass.kubernetes.io/is-default-class: "true" #ÉèÖÃΪĬÈÏ´æ´¢Àà
provisioner: kubernetes.io/rbd
parameters:
  monitors: 192.168.10.203:6789,192.168.10.204:6789,192.168.10.205:6789
  adminId: admin
  adminSecretName: ceph-secret-admin
  adminSecretNamespace: default 
  pool: shijie-rbd-pool1
  userId: shijie
  userSecretName: ceph-secret-admin
EOF

#创建存储类
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case6-ceph-storage-class.yaml 
storageclass.storage.k8s.io/ceph-storage-class-shijie created

##检查,普通用户的secret,用来挂载,admin用户的secret用创建pv
root@k8s-master1-etcd1-haproxy1:/tools# kubectl get storageclasses
NAME                                  PROVISIONER         RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-storage-class-shijie (default)   kubernetes.io/rbd   Delete          Immediate           false                  38s

#查看调用存储类创建pvc的摸版yaml文件,使用存储类就不用再去指定什么类型存储和挂载了目录服务器地址了  
这个摸版yaml文件包含了ceph集群的认证信息,admin的secret,mon服务器的地址
root@k8s-master1-etcd1-haproxy1:/tools# cat case7-mysql-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data-pvc
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: ceph-storage-class-shijie 
  resources:
    requests:
      storage: '5Gi'


#创建pvc
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case7-mysql-pvc.yaml 
persistentvolumeclaim/mysql-data-pvc created

#检查pvc,状态必须是Bound
root@k8s-master1-etcd1-haproxy1:/tools# kubectl get pvc
NAME             STATUS    VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS                AGE
mysql-data-pvc   Bound                                      ceph-storage-class-shijie   36s
#查看存储池的镜像,此时k8s自动创建了一个pvc的镜像
rbd ls --pool shijie-rbd-pool1
root@ceph-deploy:/home/cephadmin/ceph-cluster# rbd ls --pool shijie-rbd-pool1
shijie-img-img1

#配置文件说明
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: harbor.magedu.net/magedu/mysql:5.6.46 
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD          #mysql的容器默认启动会初始化配置密码,所以需要一个变量指定mysql的初始化的密码
          value: magedu123456
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:                               #声明挂载列表
        - name: mysql-persistent-storage            #挂载卷的名称
          mountPath: /var/lib/mysql                 #挂载到pod的目的路径
      volumes:                                      #声明卷配置列表
      - name: mysql-persistent-storage              #卷的名称
        persistentVolumeClaim:                      #卷的类型是pvc
          claimName: mysql-data-pvc                 #指定要绑定的pvc名称

#运行mysql的pod挂载刚刚创建的pvc
#mysql镜像运行的时候需要一个password变量,是设置mysql的密码使用的
root@k8s-master1-etcd1-haproxy1:/tools# cat case8-mysql-single.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: mysql:5.6.46 
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: magedu123456
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-data-pvc 


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label 
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 43306
  selector:
    app: mysql

#创建pod
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case8-mysql-single.yaml 
deployment.apps/mysql created
service/mysql-service created


#检查
root@k8s-master1-etcd1-haproxy1:/tools# kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
mysql-7dd5b9b5c6-q5tsv              1/1     Running   0          10m
net-test1                           1/1     Running   1          4d11h


#进入pod检查挂载
root@k8s-master1-etcd1-haproxy1:/tools# kubectl exec -it mysql-7dd5b9b5c6-q5tsv /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# df -h
Filesystem                    Size  Used Avail Use% Mounted on
overlay                        44G   12G   31G  28% /
tmpfs                          64M     0   64M   0% /dev
tmpfs                         2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-lv--0   44G   12G   31G  28% /etc/hosts
shm                            64M     0   64M   0% /dev/shm
/dev/rbd0                     4.9G  116M  4.8G   3% /var/lib/mysql   #挂载的rbd目录
tmpfs                         2.0G   12K  2.0G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                         2.0G     0  2.0G   0% /proc/acpi
tmpfs                         2.0G     0  2.0G   0% /proc/scsi
tmpfs                         2.0G     0  2.0G   0% /sys/firmware



#在ceph集群检查
root@ceph-deploy:/home/cephadmin/ceph-cluster# ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL     USED  RAW USED  %RAW USED
hdd    480 GiB  479 GiB  848 MiB   848 MiB       0.17
TOTAL  480 GiB  479 GiB  848 MiB   848 MiB       0.17
 
--- POOLS ---
POOL                   ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
device_health_metrics   1    1      0 B        0      0 B      0    152 GiB
cephfs-metadata         2   32   18 KiB       22  139 KiB      0    152 GiB
cephfs-data             3   64      0 B        0      0 B      0    152 GiB
.rgw.root               4   32  1.3 KiB        4   48 KiB      0    152 GiB
default.rgw.log         5   32  3.6 KiB      209  408 KiB      0    152 GiB
default.rgw.control     6   32      0 B        8      0 B      0    152 GiB
default.rgw.meta        7   32      0 B        0      0 B      0    152 GiB
shijie-rbd-pool1        9   32  122 MiB       49  366 MiB   0.08    152 GiB


一般都是结合第三方的存储才会使用到比如nas,或者商业存储,如果服务需要的pv和pvc不是很多可以自己创建不使用存储类自动创建

全部测试删除
kubectl delete -f .

 

5.k8s使使用cephFS

#创建元数据存储池和数据存储池
ceph osd pool create cephfs-metadata 32 32
ceph osd pool create cephfs-data 64 64
ceph fs new mycephfs cephfs-metadata cephfs-data

#验证cephFS
root@ceph-deploy:/home/cephadmin/ceph-cluster# ceph fs ls
name: mycephfs, metadata pool: cephfs-metadata, data pools: [cephfs-data ]

#ceph集群的cephfs的存储池的
ceph osd pool ls
root@ceph-deploy:/home/cephadmin/ceph-cluster# ceph osd pool ls
device_health_metrics
cephfs-metadata
cephfs-data
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
shijie-rbd-pool1

#配置文件说明
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label 
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 43306
  selector:
    app: mysql




apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:                            #声明挂载列表
        - name: magedu-staticdata-cephfs         #指定挂载卷的名称
          mountPath: /usr/share/nginx/html/      #指定挂载到pod的目的路径
      volumes:                                   #声明卷配置列表
        - name: magedu-staticdata-cephfs         #指定卷的名称
          cephfs:                                #存储卷类型为cephfs
            monitors:                            #指定mon服务器的地址列表
            - '172.31.6.101:6789'                #填写mon服务器地址+端口
            - '172.31.6.102:6789'
            - '172.31.6.103:6789'
            path: /                              #指定cephfs服务器可以挂载的目录,类似nfs
            user: admin                          #指定认证ceph集群的用户admin管理员,通过admin链接ceph集群进行权限认证
            secretRef:                           #调用secret配置
              name: ceph-secret-admin            #指定要调用secret的名称

#修改配置文件
#拷贝ceph集群的ceph.conf和admin用户的keyring文件到k8s集群的node节点
sudo scp  /etc/ceph/ceph.client.admin.keyring  root@192.168.10.114:/etc/ceph/
sudo scp  /etc/ceph/ceph.client.admin.keyring  root@192.168.10.115:/etc/ceph/
sudo scp  /etc/ceph/ceph.client.admin.keyring  root@192.168.10.116:/etc/ceph/

#创建secret
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case5-secret-admin.yaml 
secret/ceph-secret-admin created

#修改pod的配置文件
cat >case9-nginx-cephfs.yaml<<'EOF'
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: magedu-staticdata-cephfs 
          mountPath: /usr/share/nginx/html/ 
      volumes:
        - name: magedu-staticdata-cephfs
          cephfs:
            monitors:
            - '192.168.10.103:6789'
            - '192.168.10.104:6789'
            - '192.168.10.105:6789'
            path: /
            user: admin
            secretRef:
              name: ceph-secret-admin
EOF

#可以在node节点测试挂载
mkdir /tools/test-cephFS -p
mount -t ceph 192.168.24.203:6789:/  /tools/test-cephFS -o name=admin,secret=AQAsW9djkfL6FhAA1ttQXjCZW22LGmhQ3Mrhqg==

#创建测试pod
root@k8s-master1-etcd1-haproxy1:/tools# kubectl apply -f case9-nginx-cephfs.yaml 
deployment.apps/nginx-deployment created

#检查
root@k8s-master1-etcd1-haproxy1:/tools# kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
mysql-7dd5b9b5c6-q5tsv              1/1     Running   0          10m
net-test1                           1/1     Running   1          4d11h
nginx-deployment-777ccc6f96-6wlcm   1/1     Running   0          12m
nginx-deployment-777ccc6f96-pv7bl   1/1     Running   0          12m
nginx-deployment-777ccc6f96-qrcnj   1/1     Running   0          12m

#进入pod检查挂载
root@k8s-master1-etcd1-haproxy1:/tools# kubectl exec -it nginx-deployment-777ccc6f96-6wlcm /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# df -h
Filesystem                                                     Size  Used Avail Use% Mounted on
overlay                                                         44G   12G   31G  28% /
tmpfs                                                           64M     0   64M   0% /dev
tmpfs                                                          2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-lv--0                                    44G   12G   31G  28% /etc/hosts
shm                                                             64M     0   64M   0% /dev/shm
192.168.10.103:6789,192.168.10.104:6789,192.168.10.105:6789:/  152G     0  152G   0% /usr/share/nginx/html #挂载的目录
tmpfs                                                          2.0G   12K  2.0G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                                                          2.0G     0  2.0G   0% /proc/acpi
tmpfs                                                          2.0G     0  2.0G   0% /proc/scsi
tmpfs                                                          2.0G     0  2.0G   0% /sys/firmware
#添加新的数据文件
# echo "vs3333333" >> /usr/share/nginx/html/index.html
# cat /usr/share/nginx/html/index.html
vs3333333
# curl http://127.0.0.1/index.html
vs3333333
##进入另外一个pod测试
root@k8s-master1-etcd1-haproxy1:~# kubectl exec -it nginx-deployment-777ccc6f96-pv7bl /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# df -h
Filesystem                                                     Size  Used Avail Use% Mounted on
overlay                                                         44G   12G   31G  28% /
tmpfs                                                           64M     0   64M   0% /dev
tmpfs                                                          2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-lv--0                                    44G   12G   31G  28% /etc/hosts
shm                                                             64M     0   64M   0% /dev/shm
192.168.10.103:6789,192.168.10.104:6789,192.168.10.105:6789:/  152G     0  152G   0% /usr/share/nginx/html
tmpfs                                                          2.0G   12K  2.0G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                                                          2.0G     0  2.0G   0% /proc/acpi
tmpfs                                                          2.0G     0  2.0G   0% /proc/scsi
tmpfs                                                          2.0G     0  2.0G   0% /sys/firmware
# cat /usr/share/nginx/html/index.html
vs3333333
追数据测试
 # echo "vs444444" >> /usr/share/nginx/html/index.html
# cat /usr/share/nginx/html/index.html
vs3333333
vs444444
#第一个pod:nginx-deployment-777ccc6f96-6wlcm检查
# cat /usr/share/nginx/html/index.html
vs3333333
vs444444

#如果使用cephfs要隔离数据,挂载的时候区分目录实现数据的隔离,
# mkdir /usr/share/nginx/n56
# mkdir /usr/share/nginx/n57
# ls -l /usr/share/nginx/
total 8
drwxr-xr-x 2 root root    1 Feb  5 17:46 html
drwxr-xr-x 2 root root 4096 Feb  5 17:51 n56
drwxr-xr-x 2 root root 4096 Feb  5 17:51 n57
# 
基于目录区分不同的项目数据

#删除所有测试
kubectl delete -f .

 

posted @ 2023-02-06 09:43  YYQ-  阅读(892)  评论(0编辑  收藏  举报