Ceph - RBD操作


Ceph 学习目录:

  1. CEPH 部署完整版(CentOS 7 + luminous)
  2. Ceph - RBD 操作
  3. Ceph - Dashboard

环境:

IP地址 主机名
192.168.118.14 ceph-node1
192.168.118.15 ceph-node2
192.168.118.16 ceph-node3
192.168.118.17 ceph-client

注意:所有节点更新为最新内核版本!

创建 RBD

服务器端操作

创建 pool

[root@ceph-node1 ~/mycluster]#ceph osd pool create rbd 64
pool 'rbd' created

创建客户端帐号

# 创建客户端用户
[root@ceph-node1 ~/mycluster]#ceph auth get-or-create client.rbd mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=rbd'

# 查看用户及权限
[root@ceph-node1 ~/mycluster]#ceph auth get client.rbd
exported keyring for client.rbd
[client.rbd]
	key = AQB6OAhfMN4jFhAAPmO17m5Z5gP5YC11JOJcTA==
	caps mon = "allow r"
	caps osd = "allow class-read object_prefix rbd_children,allow rwx pool=rbd"

# 导出客户端keyring
[root@ceph-node1 ~/mycluster]#ceph auth get client.rbd -o ./ceph.client.rbd.keyring
exported keyring for client.rbd

pool 启动 RBD

[root@ceph-node1 ~/mycluster]#ceph osd pool application enable rbd rbd 
enabled application 'rbd' on pool 'rbd'

客户端操作

安装 ceph-common

[root@ceph-client ~]#yum install ceph-common -y

从 ceph-node1 拷贝 ceph.conf 和 认证 keyring

[root@ceph-node1 ~/mycluster]#scp ceph.conf  ceph.client.rbd.keyring ceph-client:/etc/ceph/
[root@ceph-client ~]#ls /etc/ceph/
ceph.client.rbd.keyring  ceph.conf  rbdmap

# 使用 创建的用户 rbd 查看集群状态
[root@ceph-client ~]#ceph -s --user rbd
  cluster:
    id:     45757634-b5ec-4172-957d-80c5c9f76d52
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-node1,ceph-node2,ceph-node3 (age 65m)
    mgr: no daemons active
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:

创建 image

# 创建 image
[root@ceph-client ~]#rbd create rbd1 -p rbd --size 1G --user rbd
[root@ceph-client ~]#rbd create rbd/rbd2 --size 2G --user rbd

# 查看创建的 image
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE  PARENT FMT PROT LOCK
rbd1 1 GiB          2
rbd2 2 GiB          2

# 通过json格式查看
[root@ceph-client ~]#rbd ls -p rbd -l --format json --user rbd --pretty-format
[
    {
        "image": "rbd1",
        "size": 1073741824,
        "format": 2
    },
    {
        "image": "rbd2",
        "size": 2147483648,
        "format": 2
    }
]

# 显示 image 的详细信息
[root@ceph-client ~]#rbd info rbd1 --user rbd
rbd image 'rbd1':
	size 1 GiB in 256 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 112fe2290ad6
	block_name_prefix: rbd_data.112fe2290ad6
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
	op_features:
	flags:
	create_timestamp: Sat Jul 11 09:14:18 2020
	access_timestamp: Sat Jul 11 09:14:18 2020
	modify_timestamp: Sat Jul 11 09:14:18 2020

禁止 image 的特性

默认 image 的特性包括:

features: layering, exclusive-lock, object-map, fast-diff, deep-flatten

作为 rbd 一般只需要 layering ,需要把其他的特性全部禁止掉。

# 禁止 image 特性
[root@ceph-client ~]#rbd feature disable rbd/rbd1 exclusive-lock, object-map, fast-diff, deep-flatten --user rbd
[root@ceph-client ~]#rbd feature disable rbd/rbd2 exclusive-lock, object-map, fast-diff, deep-flatten --user rbd

# 查看详细信息
[root@ceph-client ~]#rbd info rbd/rbd1 --user rbd
rbd image 'rbd1':
	size 1 GiB in 256 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 112fe2290ad6
	block_name_prefix: rbd_data.112fe2290ad6
	format: 2
	features: layering
	op_features:
	flags:
	create_timestamp: Sat Jul 11 09:14:18 2020
	access_timestamp: Sat Jul 11 09:14:18 2020
	modify_timestamp: Sat Jul 11 09:14:18 2020
[root@ceph-client ~]#rbd info rbd/rbd2 --user rbd
rbd image 'rbd2':
	size 2 GiB in 512 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 11342244e27f
	block_name_prefix: rbd_data.11342244e27f
	format: 2
	features: layering
	op_features:
	flags:
	create_timestamp: Sat Jul 11 09:14:47 2020
	access_timestamp: Sat Jul 11 09:14:47 2020
	modify_timestamp: Sat Jul 11 09:14:47 2020

客户端挂载 Image

[root@ceph-client ~]#lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1  4.4G  0 rom  /mnt/centos7
vda             252:0    0  100G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   99G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0  7.9G  0 lvm  [SWAP]
  └─centos-home 253:2    0 41.1G  0 lvm  /home
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE  PARENT FMT PROT LOCK
rbd1 1 GiB          2
rbd2 2 GiB          2

# RBD 映射到客户端主机
[root@ceph-client ~]#rbd map rbd/rbd1 --user rbd
/dev/rbd0
[root@ceph-client ~]#lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1  4.4G  0 rom  /mnt/centos7
vda             252:0    0  100G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   99G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0  7.9G  0 lvm  [SWAP]
  └─centos-home 253:2    0 41.1G  0 lvm  /home
rbd0            251:0    0    1G  0 disk

初始化文件系统

# 格式化磁盘
[root@ceph-client ~]#mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0              isize=512    agcount=8, agsize=32768 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=262144, imaxpct=25
         =                       sunit=1024   swidth=1024 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=8 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@ceph-client ~]#mkdir -pv /mnt/ceph-disk1
mkdir: created directory ‘/mnt/ceph-disk1’

# 挂载文件系统
[root@ceph-client ~]#mount /dev/rbd0 /mnt/ceph-disk1/
[root@ceph-client ~]#df -Th
Filesystem              Type      Size  Used Avail Use% Mounted on
devtmpfs                devtmpfs  3.9G     0  3.9G   0% /dev
tmpfs                   tmpfs     3.9G     0  3.9G   0% /dev/shm
tmpfs                   tmpfs     3.9G  8.6M  3.9G   1% /run
tmpfs                   tmpfs     3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/mapper/centos-root xfs        50G  1.9G   49G   4% /
/dev/vda1               xfs      1014M  149M  866M  15% /boot
/dev/mapper/centos-home xfs        42G   33M   42G   1% /home
/dev/sr0                iso9660   4.4G  4.4G     0 100% /mnt/centos7
tmpfs                   tmpfs     783M     0  783M   0% /run/user/0
/dev/rbd0               xfs      1014M   33M  982M   4% /mnt/ceph-disk1

客户端卸载磁盘

[root@ceph-client ~]#umount /dev/rbd0

# 查看本地 image 映射
[root@ceph-client ~]#rbd showmapped --user rbd
id pool namespace image snap device
0  rbd            rbd1  -    /dev/rbd0

# 卸载 image 
[root@ceph-client ~]#rbd unmap rbd/rbd1 --user rbd
[root@ceph-client ~]#lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1  4.4G  0 rom  /mnt/centos7
vda             252:0    0  100G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   99G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0  7.9G  0 lvm  [SWAP]
  └─centos-home 253:2    0 41.1G  0 lvm  /home

扩展 image 大小

[root@ceph-client ~]#rbd resize -s 5G rbd/rbd1 --user rbd
Resizing image: 100% complete...done.
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE  PARENT FMT PROT LOCK
rbd1 5 GiB          2
rbd2 2 GiB          2

删除 image

[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE  PARENT FMT PROT LOCK
rbd1 5 GiB          2
rbd2 2 GiB          2

# 删除 rbd2 
[root@ceph-client ~]#rbd rm rbd2 --user rbd
Removing image: 100% complete...done.
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE  PARENT FMT PROT LOCK
rbd1 5 GiB          2

image 放进回收站

[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE  PARENT FMT PROT LOCK
rbd1 5 GiB          2

# 将 rbd1 放进回收站
[root@ceph-client ~]#rbd trash move rbd/rbd1 --user rbd
[root@ceph-client ~]#rbd ls -l --user rbd

# 查看回收站
[root@ceph-client ~]#rbd trash list -p rbd --user rbd
112fe2290ad6 rbd1

回收站恢复 image

[root@ceph-client ~]#rbd trash list -p rbd --user rbd
112fe2290ad6 rbd1

# 恢复 rbd1
[root@ceph-client ~]#rbd trash restore -p rbd --image rbd1 --image-id 112fe2290ad6 --user rbd
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE  PARENT FMT PROT LOCK
rbd1 5 GiB          2

RBD 快照

快照前准备工作:

[root@ceph-client ~]#lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1  4.4G  0 rom  /mnt/centos7
vda             252:0    0  100G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   99G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0  7.9G  0 lvm  [SWAP]
  └─centos-home 253:2    0 41.1G  0 lvm  /home
[root@ceph-client ~]#rbd map rbd/rbd1 --user rbd
/dev/rbd0
[root@ceph-client ~]#lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1  4.4G  0 rom  /mnt/centos7
vda             252:0    0  100G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   99G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0  7.9G  0 lvm  [SWAP]
  └─centos-home 253:2    0 41.1G  0 lvm  /home
rbd0            251:0    0    5G  0 disk
[root@ceph-client ~]#mount /dev/rbd0 /mnt/ceph-disk1/
[root@ceph-client ~]#echo 'this test-1' >> /mnt/ceph-disk1/1.txt
[root@ceph-client ~]#echo 'this test-2' >> /mnt/ceph-disk1/2.txt
[root@ceph-client ~]#ls /mnt/ceph-disk1/
1.txt  2.txt

创建快照

[root@ceph-client ~]#rbd snap create rbd/rbd1@snap1 --user rbd
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
SNAPID NAME  SIZE  PROTECTED TIMESTAMP
     4 snap1 5 GiB           Sat Jul 11 09:41:19 2020

还原快照

[root@ceph-client ~]#ls /mnt/ceph-disk1/
1.txt  2.txt

# 为了检验快照恢复后数据正确性,这里删除 2.txt 文件
[root@ceph-client ~]#rm -rf /mnt/ceph-disk1/2.txt
[root@ceph-client ~]#ls /mnt/ceph-disk1/
1.txt

# 卸载image
[root@ceph-client ~]#umount /dev/rbd0
[root@ceph-client ~]#rbd unmap rbd/rbd1 --user rbd
[root@ceph-client ~]#lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1  4.4G  0 rom  /mnt/centos7
vda             252:0    0  100G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   99G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0  7.9G  0 lvm  [SWAP]
  └─centos-home 253:2    0 41.1G  0 lvm  /home
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
SNAPID NAME  SIZE  PROTECTED TIMESTAMP
     4 snap1 5 GiB           Sat Jul 11 09:41:19 2020
     
# 还原快照
[root@ceph-client ~]#rbd snap rollback rbd/rbd1@snap1 --user rbd
Rolling back to snapshot: 100% complete...done.

# 映射image
[root@ceph-client ~]#rbd map rbd/rbd1 --user rbd
/dev/rbd0
[root@ceph-client ~]#lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1  4.4G  0 rom  /mnt/centos7
vda             252:0    0  100G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   99G  0 part
  ├─centos-root 253:0    0   50G  0 lvm  /
  ├─centos-swap 253:1    0  7.9G  0 lvm  [SWAP]
  └─centos-home 253:2    0 41.1G  0 lvm  /home
rbd0            251:0    0    5G  0 disk
[root@ceph-client ~]#mount /dev/rbd0 /mnt/ceph-disk1/

# 数据恢复到快照前
[root@ceph-client ~]#ls /mnt/ceph-disk1/
1.txt  2.txt

删除快照

[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
SNAPID NAME  SIZE  PROTECTED TIMESTAMP
     4 snap1 5 GiB           Sat Jul 11 09:41:19 2020
[root@ceph-client ~]#rbd snap rm rbd/rbd1@snap1 --user rbd
Removing snap: 100% complete...done.
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd

快照次数限制

[root@ceph-client ~]#rbd snap limit set rbd/rbd1 --limit 10 --user rbd

清除快照次数限制

[root@ceph-client ~]#rbd snap limit clear rbd/rbd1 --user rbd

克隆

这里主要做的是基于快照的克隆,克隆所采用的也是 cow,叫做 copy on write 也就是常说的 “写时复制”,更贴切的说法叫“写的时候,再复制”。这里的克隆就是基于快照创建的克隆只创建了映射到源的逻辑,还没有给克隆分配真实的物理空间。这里要理解这一点。虽然快照是只读的,但是基于快照创建的克隆是可读可写的。当我们对克隆的镜像执行写操作的时候,系统才会真正的给克隆的镜像分配物理空间。克隆的镜像或者被写过的克隆镜像都是可以正常使用的和镜像本身是一样的。这就是所谓的 cow。当对克隆的镜像没有写而是读的时候,那么读取的是被克隆的快照,明白了上面的道理所有我们知道从快照克隆的镜像是依赖于快照的,一旦快照被删除则这个克隆镜像也就毁了,所以我们要保护这个快照。

创建克隆

# 创建克隆前,第一步要保护快照,以下错误提示要求先执行保护快照
[root@ceph-client ~]#rbd clone rbd/rbd1@snap1 rbd/rbd1-snap1-clone --user rbd
2020-07-11 10:05:48.783 7fb34f7fe700 -1 librbd::image::CloneRequest: 0x5647acbcf850 validate_parent: parent snapshot must be protected
rbd: clone error: (22) Invalid argument

# 执行保护快照
[root@ceph-client ~]#rbd snap protect rbd/rbd1@snap1  --user rbd
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
SNAPID NAME  SIZE  PROTECTED TIMESTAMP
     8 snap1 5 GiB yes       Sat Jul 11 10:04:34 2020
     
# 创建克隆
[root@ceph-client ~]#rbd clone rbd/rbd1@snap1 rbd/rbd1-snap1-clone --user rbd

# 查看克隆
[root@ceph-client ~]#rbd ls -l --user rbd
NAME             SIZE  PARENT         FMT PROT LOCK
rbd1             5 GiB                  2
rbd1@snap1       5 GiB                  2 yes
rbd1-snap1-clone 5 GiB rbd/rbd1@snap1   2

# 查看克隆的详细信息
[root@ceph-client ~]#rbd info rbd1-snap1-clone --user rbd
rbd image 'rbd1-snap1-clone':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 12613ea38941
	block_name_prefix: rbd_data.12613ea38941
	format: 2
	features: layering
	op_features:
	flags:
	create_timestamp: Sat Jul 11 10:07:20 2020
	access_timestamp: Sat Jul 11 10:07:20 2020
	modify_timestamp: Sat Jul 11 10:07:20 2020
	parent: rbd/rbd1@snap1
	overlap: 5 GiB

克隆成功的镜像是依赖于快照的,能看到 parentoverlap

比如想要 克隆独立存在,不依赖于快照,就需要对克隆和快照做一个合并:

# 对克隆进行合并
[root@ceph-client ~]#rbd flatten rbd/rbd1-snap1-clone --user rbd
Image flatten: 100% complete...done.

# 查看克隆是否独立存在,没有了 parent 和 overlap
[root@ceph-client ~]#rbd info rbd1-snap1-clone --user rbd
rbd image 'rbd1-snap1-clone':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 12613ea38941
	block_name_prefix: rbd_data.12613ea38941
	format: 2
	features: layering
	op_features:
	flags:
	create_timestamp: Sat Jul 11 10:07:20 2020
	access_timestamp: Sat Jul 11 10:07:20 2020
	modify_timestamp: Sat Jul 11 10:07:20 2020

如果快照不在时候用这里就可以直接删除快照:

注意:这里删除快照需要先解除保护模式

[root@ceph-client ~]#rbd info rbd/rbd1@snap1 --user rbd
rbd image 'rbd1':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	snapshot_count: 1
	id: 112fe2290ad6
	block_name_prefix: rbd_data.112fe2290ad6
	format: 2
	features: layering
	op_features:
	flags:
	create_timestamp: Sat Jul 11 09:14:18 2020
	access_timestamp: Sat Jul 11 13:44:17 2020
	modify_timestamp: Sat Jul 11 09:14:18 2020
	protected: True

# 解除对快照的保护
[root@ceph-client ~]#rbd snap unprotect rbd/rbd1@snap1 --user rbd
[root@ceph-client ~]#rbd info rbd/rbd1@snap1 --user rbd
rbd image 'rbd1':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	snapshot_count: 1
	id: 112fe2290ad6
	block_name_prefix: rbd_data.112fe2290ad6
	format: 2
	features: layering
	op_features:
	flags:
	create_timestamp: Sat Jul 11 09:14:18 2020
	access_timestamp: Sat Jul 11 13:44:17 2020
	modify_timestamp: Sat Jul 11 09:14:18 2020
	protected: False

[root@ceph-client ~]#rbd ls -l --user rbd
NAME             SIZE  PARENT FMT PROT LOCK
rbd1             5 GiB          2
rbd1@snap1       5 GiB          2
rbd1-snap1-clone 5 GiB          2

# 删除快照
[root@ceph-client ~]#rbd snap rm rbd/rbd1@snap1 --user rbd
Removing snap: 100% complete...done.
[root@ceph-client ~]#rbd ls -l --user rbd
NAME             SIZE  PARENT FMT PROT LOCK
rbd1             5 GiB          2
rbd1-snap1-clone 5 GiB          2
posted @ 2020-07-11 13:56  hukey  阅读(1802)  评论(2编辑  收藏  举报