五、Ceph之RBD存储使用

1、RBD介绍

Ceph 可以同时提供对象存储RADOSGW、块存储RBD、文件系统存储Ceph FS,RBD 即RADOS Block Device 的简称,RBD 块存储是常用的存储类型之一,RBD 块设备类似磁盘可以被挂载,RBD 块设备具有快照、多副本、克隆和一致性等特性,数据以条带化的方式存储在Ceph 集群的多个OSD 中。

条带化技术就是一种自动的将I/O 的负载均衡到多个物理磁盘上的技术,条带化技术就是将一块连续的数据分成很多小部分并把他们分别存储到不同磁盘上去。这就能使多个进程同时访问数据的多个不同部分而不会造成磁盘冲突,而且在需要对这种数据进行顺序访问的时候可以获得最大程度上的I/O 并行能力,从而获得非常好的性能。

 2、ceph端配置rbd

2.1、创建一个pool用于提供给rbd

ceph osd pool create <pool_name> pg_mun pgp_mun
pgp是对pg的数据进行组合存储,pgp通常等于pg
 
ceph osd pool create myrbd1 64 64 

2.2、给创建的pool开启rbd功能

ceph osd pool application enable myrbd1 rbd 

2.3、使用rbd命令初始化pool

rbd pool init -p myrbd1

2.4、创建img

rbd存储池并不能直接用于块设备,而是需要事先在其中按需创建映像(image),并把映像文件作为块设备使用,rbd 命令可用于创建、查看及删除块设备相在的映像(image),以及克隆映像、创建快照、将映像回滚到快照和查看快照等管理操作,例如,下面的命令能够创建一个名为myimg1 的映像。

在myrbd1的pool中创建一个mying1的rbd,其大小为5G
rbd create mying1 --size 5G --pool myrbd1

后续步骤会使用myimg2 ,由于centos 系统内核较低无法挂载使用,因此只开启部分特性。除了layering 其他特性需要高版本内核支持
rbd create myimg2 --size 3G --pool myrbd1 --image-format 2 --image-feature layering

2.5、查看pool中所有的img

cephadmin@ceph-deploy:~/ceph-cluster$ rbd ls --pool myrbd1
mying1
mying2

2.6、查看指定img的信息

cephadmin@ceph-deploy:~/ceph-cluster$ rbd --image mying1 --pool myrbd1 info
rbd image 'mying1':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 14be30954ea4
	block_name_prefix: rbd_data.14be30954ea4
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
	op_features: 
	flags: 
	create_timestamp: Sun Apr  3 22:54:06 2022
	access_timestamp: Sun Apr  3 22:54:06 2022
	modify_timestamp: Sun Apr  3 22:54:06 2022
cephadmin@ceph-deploy:~/ceph-cluster$ rbd --image myimg2 --pool myrbd1 info
rbd image 'myimg2':
	size 3 GiB in 768 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 5f0f6fb9ea1d
	block_name_prefix: rbd_data.5f0f6fb9ea1d
	format: 2
	features: layering
	op_features: 
	flags: 
	create_timestamp: Sun Apr  3 22:54:11 2022
	access_timestamp: Sun Apr  3 22:54:11 2022
	modify_timestamp: Sun Apr  3 22:54:11 2022

3、客户端使用rbd

3.1、当前ceph的状态

cephadmin@ceph-deploy:~/ceph-cluster$ ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL     USED  RAW USED  %RAW USED
hdd    120 GiB  120 GiB  294 MiB   294 MiB       0.24
TOTAL  120 GiB  120 GiB  294 MiB   294 MiB       0.24
 
--- POOLS ---
POOL                   ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
device_health_metrics   1    1      0 B        0      0 B      0     38 GiB
.rgw.root               2   32  1.3 KiB        4   48 KiB      0     38 GiB
default.rgw.log         3   32  3.6 KiB      177  408 KiB      0     38 GiB
default.rgw.control     4   32      0 B        8      0 B      0     38 GiB
default.rgw.meta        5    8      0 B        0      0 B      0     38 GiB
cephfs-metadata         6   32  4.8 KiB       60  240 KiB      0     38 GiB
cephfs-data             7   64      0 B        0      0 B      0     38 GiB
myrbd1                  8   64    405 B        7   48 KiB      0     38 GiB

3.2、客户端配置ceph源

wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main" >> /etc/apt/source.list
apt update

3.3、安装ceph-common

apt install -y ceph-common

3.4、从部署服务器同步admin认证文件到client的/etc/ceph目录

cephadmin@ceph-deploy:~/ceph-cluster$ sudo scp ceph.conf ceph.client.admin.keyring root@192.168.1.180:/etc/ceph/

3.5、在客户端映射img

root@ceph-client:~# rbd --pool myrbd1 map myimg2
/dev/rbd0
root@ceph-client:~# fdisk -l | grep rbd0
Disk /dev/rbd0: 3 GiB, 3221225472 bytes, 6291456 sectors

3.6、在client端格式化rbd0并挂载到client端的mnt目录

root@ceph-client:~# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0              isize=512    agcount=9, agsize=97280 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=0, rmapbt=0, reflink=0
data     =                       bsize=4096   blocks=786432, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
root@ceph-client:~# mount /dev/rbd0 /mnt
root@ceph-client:~# df -TH|grep rbd0
/dev/rbd0      xfs       3.3G   38M  3.2G   2% /mnt

3.7、创建一个300M的文件

root@ceph-client:~# dd if=/dev/zero of=/mnt/rbd_test bs=1M count=300
300+0 records in
300+0 records out
314572800 bytes (315 MB, 300 MiB) copied, 4.83789 s, 65.0 MB/s
root@ceph-client:~# ll -h /mnt/rbd_test 
-rw-r--r-- 1 root root 300M Apr  3 23:04 /mnt/rbd_test

3.8、在ceph-deploy端验证rbd中的数据

cephadmin@ceph-deploy:~/ceph-cluster$ ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL     USED  RAW USED  %RAW USED
hdd    120 GiB  119 GiB  1.2 GiB   1.2 GiB       0.98
TOTAL  120 GiB  119 GiB  1.2 GiB   1.2 GiB       0.98
 
--- POOLS ---
POOL                   ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
device_health_metrics   1    1      0 B        0      0 B      0     37 GiB
.rgw.root               2   32  1.3 KiB        4   48 KiB      0     37 GiB
default.rgw.log         3   32  3.6 KiB      177  408 KiB      0     37 GiB
default.rgw.control     4   32      0 B        8      0 B      0     37 GiB
default.rgw.meta        5    8      0 B        0      0 B      0     37 GiB
cephfs-metadata         6   32  4.8 KiB       60  240 KiB      0     37 GiB
cephfs-data             7   64      0 B        0      0 B      0     37 GiB
myrbd1                  8   64  310 MiB       95  931 MiB   0.80     37 GiB

4、使用普通用户挂载rbd

4.1、在ceph-deploy上创建普通账号及权限

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth get-or-create client.jack mon 'allow r' osd 'allow rwx pool=myrbd1'
[client.jack]
	key = AQDdt0liUbpCHRAA6EaxRQ0b+1KKEMbxm7T7PA==
cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth get client.jack
[client.jack]
	key = AQDdt0liUbpCHRAA6EaxRQ0b+1KKEMbxm7T7PA==
	caps mon = "allow r"
	caps osd = "allow rwx pool=myrbd1"
exported keyring for client.jack

4.2、在ceph-deploy上创建普通用户client.jack的keyring文件

# 创建空的keyring文件
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-authtool --create-keyring ceph.client.jack.keyring
creating ceph.client.jack.keyring

#导入client.kaka信息进入key.ring文件
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph auth get client.jack -o ceph.client.jack.keyring 
exported keyring for client.jack

cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph-authtool -l ceph.client.jack.keyring 
[client.jack]
	key = AQDdt0liUbpCHRAA6EaxRQ0b+1KKEMbxm7T7PA==
	caps mon = "allow r"
	caps osd = "allow rwx pool=myrbd1"

4.3、在客户端安装ceph-common

#1、安装ceph源
root@client:~#wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
root@client:~#echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main" >> /etc/apt/source.list
root@client:~#apt update
#2、安装ceph-common
root@client:~#apt install ceph-common

4.4、在ceph-deploy同步普通用户认证文件到客户端

#同步ceph.conf和ceph.client.jack.keyring到客户端
cephadmin@ceph-deploy:/etc/ceph$ sudo scp ceph.conf ceph.client.jack.keyring 192.168.1.180:/etc/ceph/
#客户端验证
root@client:/etc/ceph# ls
ceph.client.admin.keyring  ceph.client.jack.keyring ceph.conf  rbdmap

#权限验证
root@client:/etc/ceph# ceph --user jack -s
  cluster:
    id:     f0e7c394-989b-4803-86c3-5557ae25e814
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon01,ceph-mon02,ceph-mon03 (age 10h)
    mgr: ceph-mgr01(active, since 10h), standbys: ceph-mgr02
    osd: 16 osds: 11 up (since 10h), 11 in (since 10h)
 
  data:
    pools:   2 pools, 65 pgs
    objects: 94 objects, 310 MiB
    usage:   2.0 GiB used, 218 GiB / 220 GiB avail
    pgs:     65 active+clean

4.5、使用普通用户映射rbd

root@client:/etc/ceph# rbd --user jack --pool myrbd1 map mying1
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable myrbd1/mying1 object-map fast-diff deep-flatten".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
rbd: --user is deprecated, use --id
#rbd文件映射失败,问题为客户端的kernel版本过低,不支持object-map fast-diff deep-flatten等参数。
#ubuntu20.04支持上述参数

在ceph-deploy上创建img文件mying3,不添加object-map fast-diff deep-flatten等参数

#创建img文件mying3,image-format格式为2,image特性值开启layering
cephadmin@ceph-deploy:/etc/ceph$ rbd create mying3 --size 3G --pool myrbd1 --image-format 2 --image-feature layering
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1
mying1
mying2
mying3

在客户端上重新映射mying3

root@client:/etc/ceph# rbd --user jack --pool myrbd1 map mying3
/dev/rbd1
#验证,rbd1挂载成功
root@client:/etc/ceph# fdisk -l /dev/rbd1
Disk /dev/rbd1: 3 GiB, 3221225472 bytes, 6291456 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
#可以格式化使用

4.6、格式化并使用rbd

root@client:/etc/ceph# fdisk -l
Disk /dev/sda: 50 GiB, 53687091200 bytes, 104857600 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x870c4380

Device     Boot Start       End   Sectors Size Id Type
/dev/sda1  *     2048 104855551 104853504  50G 83 Linux


Disk /dev/rbd1: 3 GiB, 3221225472 bytes, 6291456 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes

#格式化rbd1磁盘
root@client:/etc/ceph# mkfs.ext4 /dev/rbd1
mke2fs 1.44.1 (24-Mar-2018)
Discarding device blocks: done                            
Creating filesystem with 786432 4k blocks and 196608 inodes
Filesystem UUID: dae3f414-ceae-4535-97d4-c369820f3116
Superblock backups stored on blocks: 
    32768, 98304, 163840, 229376, 294912

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (16384 blocks): done
Writing superblocks and filesystem accounting information:      
done

#把/dev/rbd1挂载到/mnt目录
root@client:/etc/ceph# mount /dev/rbd1 /mnt
root@client:/etc/ceph# mount |grep /dev/rbd1
/dev/rbd1 on /mnt type ext4 (rw,relatime,stripe=1024,data=ordered)

#在/mnt中创建一个200M的文件
root@client:/etc/ceph# cd /mnt
root@client:/mnt# 
root@client:/mnt# 
root@client:/mnt# dd if=/dev/zero of=/mnt/rbd-test bs=1M count=200
200+0 records in
200+0 records out
209715200 bytes (210 MB, 200 MiB) copied, 0.205969 s, 1.0 GB/s

root@client:/mnt# ll -h
total 201M
drwxr-xr-x  3 root root 4.0K Aug 23 13:06 ./
drwxr-xr-x 22 root root  326 Aug 17 09:56 ../
drwx------  2 root root  16K Aug 23 13:04 lost+found/
-rw-r--r--  1 root root 200M Aug 23 13:06 rbd-test

4.7、rbd挂载成功后,系统会自动加载ceph的内核模块

root@ceph-client:/etc/ceph# lsmod |grep ceph
libceph               315392  1 rbd
libcrc32c              16384  3 xfs,raid456,libceph

4.8、rbd镜像空间拉伸

可以扩展空间,不建议缩小空间

#当前mying3的空间
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 -l
NAME    SIZE   PARENT  FMT  PROT  LOCK
mying1  5 GiB            2            
mying2  3 GiB            2            
mying3  3 GiB            2   
#拉伸mying3的镜像为8G
cephadmin@ceph-deploy:/etc/ceph$ rbd resize --pool myrbd1 --image mying3 --size 8G
Resizing image: 100% complete...done.
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 -l
NAME    SIZE   PARENT  FMT  PROT  LOCK
mying1  5 GiB            2            
mying2  3 GiB            2            
mying3  8 GiB            2  

#在客户端可以发现/dev/rbd1已经是8G了,但是文件系统还是3G
root@client:/mnt# df -Th /mnt
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/rbd1      ext4  2.9G  209M  2.6G   8% /mnt

root@client:/mnt# df -Th /dev/rbd1
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/rbd1      ext4  2.9G  209M  2.6G   8% /mnt

#拉伸系统
#1、取消挂载
root@client:~# umount /mnt
#2、拉伸/dev/rbd1
root@client:~# resize2fs /dev/rbd1
#3、重新挂载
root@client:~# mount /dev/rbd1 /mnt

root@client:~# df -Th /dev/rbd1
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/rbd1      ext4  7.9G  214M  7.3G   3% /mnt
root@client:~# df -Th /mnt
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/rbd1      ext4  7.9G  214M  7.3G   3% /mnt

4.9、开机自动挂载

root@client:~#cat /etc/rc.d/rc.local
rbd --user jack -p myrbd1 map mying3
mount /dev/rbd1 /mnt
[root@ceph-client2 ~]# chmod a+x /etc/rc.d/rc.local

5、rbd的镜像信息及特性介绍

5.1、查看镜像详细信息

cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying1 info
rbd image 'mying1':
    size 5 GiB in 1280 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 144f88aecd24
    block_name_prefix: rbd_data.144f88aecd24
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten  #镜像特性
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:08:32 2021
    access_timestamp: Fri Aug 20 22:08:32 2021
    modify_timestamp: Fri Aug 20 22:08:32 2021

cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying2 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021

5.2、以json 格式显示镜像信息

cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 -l --format json --pretty-format
[
    {
        "image": "mying1",
        "id": "144f88aecd24",
        "size": 5368709120,
        "format": 2
    },
    {
        "image": "mying2",
        "id": "1458dabfc2f1",
        "size": 3221225472,
        "format": 2
    },
    {
        "image": "mying3",
        "id": "1893f853e249",
        "size": 3221225472,
        "format": 2
    }
]

5.3、镜像的特性

cephadmin@ceph-deploy:/etc/ceph$ rbd help feature enable 
usage: rbd feature enable [--pool <pool>] [--namespace <namespace>] 
                          [--image <image>] 
                          [--journal-splay-width <journal-splay-width>] 
                          [--journal-object-size <journal-object-size>] 
                          [--journal-pool <journal-pool>] 
                          <image-spec> <features> [<features> ...] 

Enable the specified image feature.

Positional arguments
  <image-spec>              image specification
                            (example: [<pool-name>/[<namespace>/]]<image-name>)
  <features>                image features
                            [exclusive-lock, object-map, journaling]

Optional arguments
  -p [ --pool ] arg         pool name
  --namespace arg           namespace name
  --image arg               image name
  --journal-splay-width arg number of active journal objects
  --journal-object-size arg size of journal objects [4K <= size <= 64M]
  --journal-pool arg        pool for journal objects

特性简介

(1)layering: 支持镜像分层快照特性,用于快照及写时复制,可以对image 创建快照并保护,然后从快照克隆出新的image 出来,父子image 之间采用COW 技术,共享对象数据。
(2)striping: 支持条带化v2,类似raid 0,只不过在ceph 环境中的数据被分散到不同的对象中,可改善顺序读写场景较多情况下的性能。
(3)exclusive-lock: 支持独占锁,限制一个镜像只能被一个客户端使用。
(4)object-map: 支持对象映射(依赖exclusive-lock),加速数据导入导出及已用空间统计等,此特性开启的时候,会记录image 所有对象的一个位图,用以标记对象是否真的存在,在一些场景下可以加速io。
(5)fast-diff: 快速计算镜像与快照数据差异对比(依赖object-map)。
(6)deep-flatten: 支持快照扁平化操作,用于快照管理时解决快照依赖关系等。
(7)journaling: 修改数据是否记录日志,该特性可以通过记录日志并通过日志恢复数据(依赖独占锁),开启此特性会增加系统磁盘IO 使用。
(8)jewel 默认开启的特性包括: layering/exlcusive lock/object map/fast diff/deep flatten

5.4、镜像特性的启用

cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying2 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021


#启用指定存储池中的指定镜像的特性:
cephadmin@ceph-deploy:/etc/ceph$ rbd feature enable exclusive-lock --pool myrbd1 --image mying2
cephadmin@ceph-deploy:/etc/ceph$ rbd feature enable object-map --pool myrbd1 --image mying2
cephadmin@ceph-deploy:/etc/ceph$ rbd feature enable fast-diff --pool myrbd1 --image mying2

#验证
cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying2 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff  #特性已开启
    op_features: 
    flags: object map invalid, fast diff invalid
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021

5.5、镜像特性的禁用

#禁用指定存储池中指定镜像的特性
cephadmin@ceph-deploy:/etc/ceph$ rbd feature disable fast-diff --pool myrbd1 --image mying2

cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying2 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering, exclusive-lock #fast-diff特性已关闭
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021

6、客户端卸载rbd镜像

root@ceph-client:/etc/ceph# umount /mnt
root@ceph-client:/etc/ceph# rbd --pool myrbd1 unmap mying3

7、ceph永久删除rbd镜像

镜像删除后数据也会被删除而且是无法恢复,因此在执行删除操作的时候要慎重。

cephadmin@ceph-deploy:/etc/ceph$ rbd help rm
usage: rbd rm [--pool <pool>] [--namespace <namespace>] [--image <image>] 
              [--no-progress] 
              <image-spec> 

Delete an image.

Positional arguments
  <image-spec>         image specification
                       (example: [<pool-name>/[<namespace>/]]<image-name>)

Optional arguments
  -p [ --pool ] arg    pool name
  --namespace arg      namespace name
  --image arg          image name
  --no-progress        disable progress output
#删除pool=myrbd1中的mying1镜像
cephadmin@ceph-deploy:/etc/ceph$ rbd rm --pool myrbd1 --image mying1
Removing image: 100% complete...done.
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 
mying2
mying3

8、rbd 镜像回收站机制

删除的镜像数据无法恢复,但是还有另外一种方法可以先把镜像移动到回收站,后期确认删除的时候再从回收站删除即可。

cephadmin@ceph-deploy:/etc/ceph$rbd trash --help
status Show the status of this image.
trash list (trash ls) List trash images.
trash move (trash mv) Move an image to the trash.
trash purge Remove all expired images from trash.
trash remove (trash rm) Remove an image from trash.
trash restore Restore an image from trash.
#查看镜像的状态
cephadmin@ceph-deploy:/etc/ceph$ rbd status --pool=myrbd1 --image=mying3
Watchers:
    watcher=172.168.32.111:0/80535927 client.14665 cookie=18446462598732840962

cephadmin@ceph-deploy:/etc/ceph$ rbd status --pool=myrbd1 --image=mying2
Watchers:
    watcher=172.168.32.111:0/1284154910 client.24764 cookie=18446462598732840961

#将mying2进行移动到回收站
cephadmin@ceph-deploy:/etc/ceph$ rbd trash move --pool myrbd1 --image mying2
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1
mying3

#查看回收站的镜像
cephadmin@ceph-deploy:/etc/ceph$ rbd trash list --pool myrbd1
1458dabfc2f1 mying2  #1458dabfc2f1镜像ID,恢复镜像时需要使用ID

#从回收站还原镜像
cephadmin@ceph-deploy:/etc/ceph$ rbd trash restore --pool myrbd1 --image mying2 --image-id 1458dabfc2f1
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 -l
NAME    SIZE   PARENT  FMT  PROT  LOCK
mying2  3 GiB            2            
mying3  8 GiB            2 

#永久删除回收站的镜像
#如果镜像不再使用,可以直接使用trash remove 将其从回收站删除
cephadmin@ceph-deploy:/etc/ceph$rbd trash remove --pool myrbd1 --image-id 1458dabfc2f1

9、rbd镜像快照

9.1、镜像快照命令

cephadmin@ceph-deploy:/etc/ceph$rbd help snap
snap create (snap add) #创建快照
snap limit clear #清除镜像的快照数量限制
snap limit set #设置一个镜像的快照上限
snap list (snap ls) #列出快照
snap protect #保护快照被删除
snap purge #删除所有未保护的快照
snap remove (snap rm) #删除一个快照
snap rename #重命名快照
snap rollback (snap revert) #还原快照
snap unprotect #允许一个快照被删除(取消快照保护)

9.2、创建快照

#在客户端查看当前数据
root@client:~# ll -h /mnt
total 201M
drwxr-xr-x  3 root root 4.0K Aug 23 13:06 ./
drwxr-xr-x 22 root root  326 Aug 17 09:56 ../
drwx------  2 root root  16K Aug 23 13:04 lost+found/
-rw-r--r--  1 root root 200M Aug 23 13:06 rbd-test

#在ceph-deploy创建快照
cephadmin@ceph-deploy:/etc/ceph$ rbd help snap create
usage: rbd snap create [--pool <pool>] [--namespace <namespace>] 
                       [--image <image>] [--snap <snap>] [--skip-quiesce] 
                       [--ignore-quiesce-error] [--no-progress] 
                       <snap-spec> 

cephadmin@ceph-deploy:/etc/ceph$ rbd snap create --pool myrbd1 --image mying3 --snap mying3-snap-20210823
Creating snap: 100% complete...done.
#验证快照
cephadmin@ceph-deploy:/etc/ceph$ rbd snap list --pool myrbd1 --image mying3
SNAPID  NAME                  SIZE   PROTECTED  TIMESTAMP               
     4  mying3-snap-20210823  8 GiB             Mon Aug 23 14:01:30 2021

9.3、客户端删除数据,并使用快照还原

#客户端删除数据,并卸载rbd
root@client:~# rm -rf /mnt/rbd-test 
root@client:~# ll /mnt
total 20
drwxr-xr-x  3 root root  4096 Aug 23 14:03 ./
drwxr-xr-x 22 root root   326 Aug 17 09:56 ../
drwx------  2 root root 16384 Aug 23 13:04 lost+found/
root@client:~# umount /mnt
root@client:~# rbd --pool myrbd1 unmap --image mying3

#使用快照恢复数据
root@client:~# rbd help snap rollback 
usage: rbd snap rollback [--pool <pool>] [--namespace <namespace>] 
                         [--image <image>] [--snap <snap>] [--no-progress] 
                         <snap-spec> 
#回滚快照
cephadmin@ceph-deploy:/etc/ceph$ sudo rbd snap rollback --pool myrbd1 --image mying3 --snap mying3-snap-20210823
Rolling back to snapshot: 100% complete...done.
#在客户端验证
root@client:~# rbd --pool myrbd1 map mying3
/dev/rbd1
root@client:~# mount /dev/rbd1 /mnt
root@client:~# ll -h /mnt
total 201M
drwxr-xr-x  3 root root 4.0K Aug 23 13:06 ./
drwxr-xr-x 22 root root  326 Aug 17 09:56 ../
drwx------  2 root root  16K Aug 23 13:04 lost+found/
-rw-r--r--  1 root root 200M Aug 23 13:06 rbd-test
#数据恢复成功

9.4、删除指定快照

cephadmin@ceph-deploy:/etc/ceph$ rbd snap list --pool myrbd1 --image mying3
SNAPID  NAME                  SIZE   PROTECTED  TIMESTAMP               
     4  mying3-snap-20210823  8 GiB             Mon Aug 23 14:01:30 2021
     
cephadmin@ceph-deploy:/etc/ceph$ rbd snap rm --pool myrbd1 --image mying3 --snap mying3-snap-20210823
Removing snap: 100% complete...done.

cephadmin@ceph-deploy:/etc/ceph$ rbd snap list --pool myrbd1 --image mying3
cephadmin@ceph-deploy:/etc/ceph$

9.5、限制快照数量

#设置与修改快照数量限制
cephadmin@ceph-deploy:/etc/ceph$ rbd snap limit set --pool myrbd1 --image mying3 --limit 30
cephadmin@ceph-deploy:/etc/ceph$ rbd snap limit set --pool myrbd1 --image mying3 --limit 20
cephadmin@ceph-deploy:/etc/ceph$ rbd snap limit set --pool myrbd1 --image mying3 --limit 15
#清除快照数量限制
cephadmin@ceph-deploy:/etc/ceph$ rbd snap limit clear --pool myrbd1 --image mying3

  

 

posted @ 2022-04-03 23:39  zhrx  阅读(1404)  评论(0编辑  收藏  举报