ceph 创建使用 rbd

# 查看 osd 池
ceph osd lspools

# 创建 pool 池, pool osd pool create <pool> [<pg_num:int>] [<pgp_num:int>]
ceph osd pool create ceph-demo 64 64

# 初始化这个pool 为 RBD (不加时默认为rbd)
rbd pool init ceph-demo

ceph osd lspools

# 查看 pool pg 数量
ceph osd pool get ceph-demo pg_num

# 查看 pool pgp 数量
ceph osd pool get ceph-demo pgp_num

# 查看 pool 的副本数
ceph osd pool get ceph-demo size

# 查看 pool 的存储规则
ceph osd pool get ceph-demo crush_rule

# 调整 pool pg 数量
ceph osd pool set ceph-demo pg_num 128

# 查看 ceph 集群状态
ceph -s

# 创建块设备镜像
rbd create -p ceph-demo --image rbd-demo.img --size 5G
rbd create ceph-demo/rbd-demo-1.img --size 5G

# 查看 ceph-demo 池中的块设备
rbd -p ceph-demo ls

# 查看镜像信息
rbd info ceph-demo/rbd-demo.img
rbd -p ceph-demo info rbd-demo.img

# 映射块设备
rbd map ceph-demo/rbd-demo.img

dmesg |tail
rbd -p ceph-demo info rbd-demo.img

# 因为操作系统内核无法支持某些特性,所以禁用掉
rbd feature disable ceph-demo/rbd-demo.img deep-flatten
rbd feature disable ceph-demo/rbd-demo.img fast-diff
rbd feature disable ceph-demo/rbd-demo.img exclusive-lock
rbd map ceph-demo/rbd-demo.img

# 查看映射的块存储
rbd device list

# 格式化块设备进行挂载
mkfs.xfs /dev/rbd0 
mkdir /mnt/rbd-demo
mount /dev/rbd0 /mnt/rbd-demo/

# 测试写入数据
cd /mnt/rbd-demo/
echo test > test.txt

# 对镜像进行扩容
rbd resize ceph-demo/rbd-demo.img --size 6G
rbd -p ceph-demo info --image rbd-demo.img

xfs_growfs /dev/rbd0 

# 查看镜像信息
rbd -p ceph-demo info rbd-demo.img

# 列出 ceph-demo 资源池对象
rados -p ceph-demo ls 
rados -p ceph-demo ls |grep  rbd_data.85e3642a0138

# 统计这个对象
rados -p ceph-demo stat rbd_data.85e3642a0138.000000000000031b

# 查看osd物理存储位置
ceph osd map ceph-demo rbd_data.85e3642a0138.000000000000031b

# 把 OSD 运行图转储为树,每个 OSD 一行、包含权重和状态。
ceph osd tree
for i in `rados -p ceph-demo ls |grep rbd_data.85e3642a0138`; do ceph osd map ceph-demo ${i};done

# 显示集群健康状况
ceph health detail

# 查看池的应用类型
ceph osd pool application get ceph-demo

# 列出所有的crash 的时间戳和id,并存档crash 信息
ceph crash ls

# 为池关联应用类型,显式指定能够使用它的ceph应用类型:ceph块设备,ceph对象网关,ceph文件系统
ceph osd pool application enable ceph-demo rbd

systemctl status ceph-mgr@node-1
systemctl status ceph-mgr@node-2
systemctl status ceph-mgr@node-3

参考:http://docs.ceph.org.cn/

posted @ 2021-05-16 18:09  klvchen  阅读(850)  评论(0编辑  收藏  举报