Ceph 存储集群扩容案例
- 扩容的OSD磁盘及id
HCI-7
/dev/sdb ssd id=36
/dev/sdc ssd id=37
HCI-9
/dev/sdb sas id=48
/dev/sdc sas id=49
HCI-10
/dev/sdc sas id=54
/dev/sdd sas id=55
- 清理OSD及id回收
ceph osd out {osd-num}
ceph -w
注意:有时,通常在具有很少主机的“小”集群中(例如,具有小的测试集群),取出OSD的事实可以产生CRUSH转角情况,其中一些PG仍然停留在活动+重映射状态。如果你是这种情况,你应该用以下方式标记OSD:
ceph osd in {osd-num}
ceph osd crush reweight osd.{osd-num} 0
ssh {osd-host}
sudo /etc/init.d/ceph stop osd.{osd-num}
ceph osd crush remove {name}
ceph auth del osd.{osd-num}
ceph osd rm {osd-num}
- 重新添加OSD
ceph-deploy disk list HCI-7
ceph-deploy disk zap HCI-7:sdb //抹除磁盘数据(可选)
ceph-deploy osd create HCI-7:sdb
- 移动OSD
ceph osd crush add osd.36 1.0 root=SSD host=HCI-7-SSD
- 移动host
ceph osd crush move HCI-10-SAS root=SAS
crush 操作示例
# 添加root bucket
$ ceph osd crush add-bucket root-ssd root
$ ceph osd crush add-bucket root-sas root
$ ceph osd crush add-bucket root-sata root
# 添加host bucket
ceph osd crush add-bucket ceph1-ssd host
ceph osd crush add-bucket ceph1-sas host
ceph osd crush add-bucket ceph1-sata host
root@R320-1:~# ceph osd crush move ceph1-sata root=root-sata
moved item id -11 name 'ceph1-sata' to location {root=root-sata} in crush map
root@R320-1:~# ceph osd crush move ceph1-sas root=root-sas
moved item id -10 name 'ceph1-sas' to location {root=root-sas} in crush map
root@R320-1:~# ceph osd crush move ceph1-ssd root=root-ssd
moved item id -9 name 'ceph1-ssd' to location {root=root-ssd} in crush map
root@R320-1:~# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-8 0 root root-sata
-11 0 host ceph1-sata
-7 0 root root-sas
-10 0 host ceph1-sas
-6 0 root root-ssd
-9 0 host ceph1-ssd
-1 7.27399 root default
-2 1.81850 host R420-1
0 1.81850 osd.0 up 1.00000 1.00000
-3 1.81850 host R420-2
1 1.81850 osd.1 up 1.00000 1.00000
-4 1.81850 host R420-4
2 1.81850 osd.2 up 1.00000 1.00000
-5 1.81850 host R420-3
3 1.81850 osd.3 up 1.00000 1.00000
# 移除host bucket
ceph osd crush remove ceph1-ssd
ceph osd crush remove ceph1-sas
ceph osd crush remove ceph1-sata
# crush常用操作命令
ceph osd crush tree
ceph osd crush rule ls
ceph osd crush rule dump
ceph osd crush tree
ceph osd crush set {name} {weight} root={root} [{bucket-type}={bucket-name} ...]
ceph osd crush reweight {name} {weight}
ceph osd crush remove {name}
ceph osd crush add-bucket {bucket-name} {bucket-type}
ceph osd crush move {bucket-name} {bucket-type}={bucket-name}, [...]
ceph osd crush remove {bucket-name}