暂留1

# vim /etc/hosts 追加
192.168.4.20 client20
192.168.4.21 node21
192.168.4.22 node22
192.168.4.23 node23
192.168.4.24 node24
192.168.4.25 node25


# cat /etc/yum.repos.d/rhel7.repo
[rhel7]
name=rhel7.0
baseurl=ftp://192.168.4.254/rhel7
enabled=1
gpgcheck=0

# cat /etc/yum.repos.d/ceph.repo
[mon]
name=mon
baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/MON
gpgcheck=0
[osd]
name=osd
baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/OSD
gpgcheck=0
[tools]
name=tools
baseurl=ftp://192.168.4.254/ceph/rhceph-2.0-rhel-7-x86_64/Tools
gpgcheck=0

 

[root@node1 ceph-cluster]# ceph-deploy --overwrite-conf osd create node1:vdc:/dev/vdb1 node1:vdd:/dev/vdb2

 

# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 20G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 19G 0 part
├─rhel-root 253:0 0 17G 0 lvm /
└─rhel-swap 253:1 0 2G 0 lvm [SWAP]
vdb 252:16 0 20G 0 disk
├─vdb1 252:17 0 10G 0 part
└─vdb2 252:18 0 10G 0 part
vdc 252:32 0 20G 0 disk
└─vdc1 252:33 0 20G 0 part /var/lib/ceph/osd/ceph-5
vdd 252:48 0 20G 0 disk
└─vdd1 252:49 0 20G 0 part /var/lib/ceph/osd/ceph-6
[root@node3 ~]# umount /var/lib/ceph/osd/ceph-5
umount: /var/lib/ceph/osd/ceph-5:目标忙。
(有些情况下通过 lsof(8) 或 fuser(1) 可以
找到有关使用该设备的进程的有用信息)
[root@node3 ~]# cd /
[root@node3 /]# umount /var/lib/ceph/osd/ceph-5
umount: /var/lib/ceph/osd/ceph-5:目标忙。
(有些情况下通过 lsof(8) 或 fuser(1) 可以
找到有关使用该设备的进程的有用信息)
[root@node3 /]# killall ceph-osd
[root@node3 /]# umount /var/lib/ceph/osd/ceph-5
[root@node3 /]# umount /var/lib/ceph/osd/ceph-6

 

ceph osd tree

 

# ceph osd down 9
osd.9 is already down.
[root@node1 ceph-cluster]# ceph osd down 10
osd.10 does not exist.
[root@node1 ceph-cluster]# ceph osd down 11
osd.11 is already down.
[root@node1 ceph-cluster]# ceph osd down 12
marked down osd.12.


# ceph osd pool get rbd pg_num
pg_num: 64
[root@node1 ceph-cluster]# ceph osd pool set rbd pg_num 128
set pool 0 pg_num to 128

 

posted @ 2019-04-29 00:44  安于夏  阅读(118)  评论(0编辑  收藏  举报