查看状态
1. 查看mon服务状态
# service ceph status mon.ceph-node1
=== mon.ceph-node1 ===
mon.ceph-node1: running {"version":"0.94.9"}
2. 查看osd服务状态
# service ceph status osd.1
=== osd.1 ===
osd.1: running {"version":"0.94.9"}
查看pool的数据
# rbd ls compute
5b97f529-f328-4a18-88ec-0f50e8812e53_disk
ebf9d6bf-db02-4b43-aed8-9361fc179563_disk
查看osd状态
# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 1.56000 root default
-2 0.78000 host ceph-node2
0 0.78000 osd.0 up 1.00000 1.00000
-3 0.78000 host ceph-node1
1 0.78000 osd.1 up 1.00000 1.00000
删除OSD
ceph osd out osd.3
ssh node1 service ceph stop osd.3
ceph osd crush remove osd.3
ceph auth del osd.3 //从认证中删除
ceph osd rm 3 //删除
删除osd节点
ceph osd crush remove ceph-node4
设置
系统设置
1. 添加开机自启动
# chkconfig --add ceph
# chkconfig ceph on
查看disk格式
[root@compute07 vmwear_node]# qemu-img info -f rbd rbd:compute/fefb3f3f-1c7f-4feb-ae3b-159db75dfe3f_disk
image: rbd:compute/fefb3f3f-1c7f-4feb-ae3b-159db75dfe3f_disk
file format: rbd
virtual size: 20G (21474836480 bytes)
disk size: unavailable
cluster_size: 4194304#
导出disk
# rbd export images/dc6f5b06-08fe-4866-918e-a1bb74040872 glance-1
e UNIX domain socket to '/var/run/ceph/guests/ceph-client.admin.12180.64912368.asok': (2) No such file or directory
Exporting image: 100% complete...done.
获得并修改CRUSH maps
## save current crushmap in binary
ceph osd getcrushmap -o crushmap.bin
## Convert to txt
crushtool -d crushmap.bin -o crushmap.txt
## Edit it and re-convert to binary
crushtool -c crushmap.txt -o crushmap.bin.new
## Inject into running system
ceph osd setcrushmap -i crushmap.bin.new
## If you've added a new ruleset and want to use that for a pool, do something like:
ceph osd pool default crush rule = 4
#也可以这样设置一个pool的rule
cpeh osd pool set testpool crush_ruleset <ruleset_id>