基于centos7.x手动安装ceph后端存储服务

基于centos7.x手动安装ceph后端存储服务

 欢迎加QQ群:1026880196 进行交流学习




准备环境 
ceph1 ceph2 ....上安装基础依赖包 
yum -y install yum-plugin-priorities
yum install -y yum-utils snappy leveldb gdisk python-argparse gperftools-libs
echo 'kernel.pid_max = 4194303' >> /etc/sysctl.conf && sysctl -p


0. ceph1 ceph2 ....防火墙设置
firewall-cmd --zone=public --add-port=22/tcp --permanent
firewall-cmd --zone=public --add-port=123/tcp --permanent
firewall-cmd --zone=public --add-port=6789/tcp --permanent
firewall-cmd --zone=public --add-port=6800-7300/tcp --permanent
firewall-cmd --zone=public --add-port=8443/tcp --permanent
firewall-cmd --zone=public --add-port=9292/tcp --permanent
firewall-cmd --reload
firewall-cmd --zone=public --list-ports
 


1. ssh免密验证(ceph1节点操作,其它节点不操作)
ssh-keygen  
ssh-copy-id root@controller1  
ssh-copy-id root@controller2  
ssh-copy-id root@controller3
ssh-copy-id root@computer1  
ssh-copy-id root@computer2
ssh-copy-id root@computer3
ssh-copy-id root@computer4
ssh-copy-id root@computer5
ssh-copy-id root@computer6
ssh-copy-id root@computer7
ssh-copy-id root@computer8
ssh-copy-id root@cinder1
ssh-copy-id root@cinder2
ssh-copy-id root@cinder3
ssh-copy-id root@ceph2
ssh-copy-id root@ceph3
ssh-copy-id root@ceph4
ssh-copy-id root@ceph5
ssh-copy-id root@ceph6
ssh-copy-id root@ceph7
ssh-copy-id root@ceph8

 

1. ceph1上安装ceph-deploy配置工具(2.0.0 --version)
yum install ceph-deploy  -y 

 

2. ceph1上创建ceph工作目录
mkdir /etc/ceph
cd /etc/ceph/

 

3. ceph1上初始化Mon配置
ceph-deploy new ceph1 ceph2 ceph3

 

4. ceph1上修改ceph.conf配置文件
echo '
public network = 10.64.0.0/24
cluster network = 10.128.0.0/24

mon_clock_drift_allowed = 2    
osd_pool_default_pg_num = 512
osd_pool_default_pgp_num = 512
osd pool default size = 3
osd pool default min size = 2
rbd_default_features = 1
client_quota = true
mon clock drift allowed = 2
mon clock drift warn backoff = 30

[mon]
mon allow pool delete = true

[mgr]
mgr modules = dashboard
'>>./ceph.conf

 

5. controller1 controller2 controller3 computer1 computer2 computer3 computer4 computer5 computer6 computer7 computer8 ceph1 ceph2 ceph3 ceph4 ceph5 ceph6 ceph7 ceph8 cinder1 cinder2 cinder3上安装ceph ceph-radosgw 
yum install -y ceph ceph-radosgw

 

6. ceph1上初始化monitor和key
ceph-deploy --overwrite-conf mon create-initial

执行成功会出现以下几个文件:
[root@ceph1 ceph]# ll
total 292
-rw------- 1 root root     71 May 14 11:01 ceph.bootstrap-mds.keyring
-rw------- 1 root root     71 May 14 11:01 ceph.bootstrap-mgr.keyring
-rw------- 1 root root     71 May 14 11:01 ceph.bootstrap-osd.keyring
-rw------- 1 root root     71 May 14 11:01 ceph.bootstrap-rgw.keyring
-rw------- 1 root root     63 May 14 11:01 ceph.client.admin.keyring
-rw-r--r-- 1 root root    601 May 14 11:00 ceph.conf
-rw-r--r-- 1 root root 264395 May 14 11:01 ceph-deploy-ceph.log
-rw------- 1 root root     73 May 14 10:06 ceph.mon.keyring
-rw-r--r-- 1 root root     92 Apr 24 00:59 rbdmap


7. ceph1拷贝配置及密钥
ceph-deploy admin controller1 controller2 controller3 computer1 computer2 computer3 computer4 computer5 computer6 computer7 computer8 ceph2 ceph3 ceph4 ceph5 ceph6 ceph7 ceph8 cinder1 cinder2 cinder3
chmod 644 /etc/ceph/ceph.client.admin.keyring


8. ceph1 ceph2 ceph3 ceph4 ceph5 ceph6 ceph7 ceph8上查看 硬盘分区情况
lsblk或者 fdisk -l
[root@ceph1 ceph]#  lsblk
NAME   MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
sda      8:0    0   7.3T  0 disk 
sdb      8:16   0   7.3T  0 disk 
sdc      8:32   0   7.3T  0 disk 
sdd      8:48   0   7.3T  0 disk 
sde      8:64   0   7.3T  0 disk 
sdf      8:80   0   7.3T  0 disk 
sdg      8:96   0   7.3T  0 disk 
sdh      8:112  0   7.3T  0 disk 
sdi      8:128  0   7.3T  0 disk 
sdj      8:144  0   7.3T  0 disk 
sdk      8:160  0   7.3T  0 disk 
sdl      8:176  0   7.3T  0 disk 
sdm      8:192  0   7.3T  0 disk 
sdn      8:208  0   7.3T  0 disk 
sdo      8:224  0   7.3T  0 disk 
sdp      8:240  0   7.3T  0 disk 
sdq     65:0    0   7.3T  0 disk 
sdr     65:16   0   7.3T  0 disk 
sds     65:32   0   7.3T  0 disk 
sdt     65:48   0   7.3T  0 disk 
sdu     65:64   0   7.3T  0 disk 
sdv     65:80   0   7.3T  0 disk 
sdw     65:96   0   7.3T  0 disk 
sdx     65:112  0   7.3T  0 disk 
sdy     65:128  0 222.6G  0 disk 
├─sdy1  65:129  0  1000M  0 part /boot/efi
├─sdy2  65:130  0  1000M  0 part /boot
├─sdy3  65:131  0    32G  0 part [SWAP]
└─sdy4  65:132  0 188.6G  0 part /


8. ceph1上添加mgr(ceph 12版本开始,monitor必须添加mgr)
ceph-deploy mgr create ceph1
ceph-deploy mgr create ceph2
ceph-deploy mgr create ceph3


9. ceph1上启用mgr的dashboard
ceph mgr module enable dashboard 


10. ceph1上给dashboard签发一个证书
ceph dashboard create-self-signed-cert


11. ceph1上配置dashboard的登录账户密码
ceph dashboard set-login-credentials admin yangBJ2666@ccXX2019

 

12 .查看方式

ceph mgr services

[root@con-node1 ~]# ceph mgr services
{
    "dashboard": "https://ceph1:8443/"
}
[root@con-node1 ~]# 

#访问https://10.64.0.54:8443  admin yangBJ2666@ccXX2019

 

13. ceph1 ceph2 ceph3 ceph4 ceph5 ceph6 ceph7 ceph8  格式化清除数据  
mkfs.xfs -f /dev/sda
mkfs.xfs -f /dev/sdb
mkfs.xfs -f /dev/sdc
mkfs.xfs -f /dev/sdd
mkfs.xfs -f /dev/sde
mkfs.xfs -f /dev/sdf
mkfs.xfs -f /dev/sdg
mkfs.xfs -f /dev/sdh
mkfs.xfs -f /dev/sdi
mkfs.xfs -f /dev/sdj
mkfs.xfs -f /dev/sdk
mkfs.xfs -f /dev/sdl
mkfs.xfs -f /dev/sdm
mkfs.xfs -f /dev/sdn
mkfs.xfs -f /dev/sdo
mkfs.xfs -f /dev/sdp
mkfs.xfs -f /dev/sdq
mkfs.xfs -f /dev/sdr
mkfs.xfs -f /dev/sds
mkfs.xfs -f /dev/sdt
mkfs.xfs -f /dev/sdu
mkfs.xfs -f /dev/sdv
mkfs.xfs -f /dev/sdw
mkfs.xfs -f /dev/sdx

 


14. ceph1执行添加OSD  
ceph-deploy osd create --data /dev/sda ceph1
ceph-deploy osd create --data /dev/sdb ceph1
ceph-deploy osd create --data /dev/sdc ceph1
ceph-deploy osd create --data /dev/sdd ceph1
ceph-deploy osd create --data /dev/sde ceph1
ceph-deploy osd create --data /dev/sdf ceph1
ceph-deploy osd create --data /dev/sdg ceph1
ceph-deploy osd create --data /dev/sdh ceph1
ceph-deploy osd create --data /dev/sdi ceph1
ceph-deploy osd create --data /dev/sdj ceph1
ceph-deploy osd create --data /dev/sdk ceph1
ceph-deploy osd create --data /dev/sdl ceph1
ceph-deploy osd create --data /dev/sdm ceph1
ceph-deploy osd create --data /dev/sdn ceph1
ceph-deploy osd create --data /dev/sdo ceph1
ceph-deploy osd create --data /dev/sdp ceph1
ceph-deploy osd create --data /dev/sdq ceph1
ceph-deploy osd create --data /dev/sdr ceph1
ceph-deploy osd create --data /dev/sds ceph1
ceph-deploy osd create --data /dev/sdt ceph1
ceph-deploy osd create --data /dev/sdu ceph1
ceph-deploy osd create --data /dev/sdv ceph1
ceph-deploy osd create --data /dev/sdw ceph1
ceph-deploy osd create --data /dev/sdx ceph1


ceph-deploy osd create --data /dev/sda ceph2
ceph-deploy osd create --data /dev/sdb ceph2
ceph-deploy osd create --data /dev/sdc ceph2
ceph-deploy osd create --data /dev/sdd ceph2
ceph-deploy osd create --data /dev/sde ceph2
ceph-deploy osd create --data /dev/sdf ceph2
ceph-deploy osd create --data /dev/sdg ceph2
ceph-deploy osd create --data /dev/sdh ceph2
ceph-deploy osd create --data /dev/sdi ceph2
ceph-deploy osd create --data /dev/sdj ceph2
ceph-deploy osd create --data /dev/sdk ceph2
ceph-deploy osd create --data /dev/sdl ceph2
ceph-deploy osd create --data /dev/sdm ceph2
ceph-deploy osd create --data /dev/sdn ceph2
ceph-deploy osd create --data /dev/sdo ceph2
ceph-deploy osd create --data /dev/sdp ceph2
ceph-deploy osd create --data /dev/sdq ceph2
ceph-deploy osd create --data /dev/sdr ceph2
ceph-deploy osd create --data /dev/sds ceph2
ceph-deploy osd create --data /dev/sdt ceph2
ceph-deploy osd create --data /dev/sdu ceph2
ceph-deploy osd create --data /dev/sdv ceph2
ceph-deploy osd create --data /dev/sdw ceph2
ceph-deploy osd create --data /dev/sdx ceph2


ceph-deploy osd create --data /dev/sda ceph3
ceph-deploy osd create --data /dev/sdb ceph3
ceph-deploy osd create --data /dev/sdc ceph3
ceph-deploy osd create --data /dev/sdd ceph3
ceph-deploy osd create --data /dev/sde ceph3
ceph-deploy osd create --data /dev/sdf ceph3
ceph-deploy osd create --data /dev/sdg ceph3
ceph-deploy osd create --data /dev/sdh ceph3
ceph-deploy osd create --data /dev/sdi ceph3
ceph-deploy osd create --data /dev/sdj ceph3
ceph-deploy osd create --data /dev/sdk ceph3
ceph-deploy osd create --data /dev/sdl ceph3
ceph-deploy osd create --data /dev/sdm ceph3
ceph-deploy osd create --data /dev/sdn ceph3
ceph-deploy osd create --data /dev/sdo ceph3
ceph-deploy osd create --data /dev/sdp ceph3
ceph-deploy osd create --data /dev/sdq ceph3
ceph-deploy osd create --data /dev/sdr ceph3
ceph-deploy osd create --data /dev/sds ceph3
ceph-deploy osd create --data /dev/sdt ceph3
ceph-deploy osd create --data /dev/sdu ceph3
ceph-deploy osd create --data /dev/sdv ceph3
ceph-deploy osd create --data /dev/sdw ceph3
ceph-deploy osd create --data /dev/sdx ceph3


ceph-deploy osd create --data /dev/sda ceph4
ceph-deploy osd create --data /dev/sdb ceph4
ceph-deploy osd create --data /dev/sdc ceph4
ceph-deploy osd create --data /dev/sdd ceph4
ceph-deploy osd create --data /dev/sde ceph4
ceph-deploy osd create --data /dev/sdf ceph4
ceph-deploy osd create --data /dev/sdg ceph4
ceph-deploy osd create --data /dev/sdh ceph4
ceph-deploy osd create --data /dev/sdi ceph4
ceph-deploy osd create --data /dev/sdj ceph4
ceph-deploy osd create --data /dev/sdk ceph4
ceph-deploy osd create --data /dev/sdl ceph4
ceph-deploy osd create --data /dev/sdm ceph4
ceph-deploy osd create --data /dev/sdn ceph4
ceph-deploy osd create --data /dev/sdo ceph4
ceph-deploy osd create --data /dev/sdp ceph4
ceph-deploy osd create --data /dev/sdq ceph4
ceph-deploy osd create --data /dev/sdr ceph4
ceph-deploy osd create --data /dev/sds ceph4
ceph-deploy osd create --data /dev/sdt ceph4
ceph-deploy osd create --data /dev/sdu ceph4
ceph-deploy osd create --data /dev/sdv ceph4
ceph-deploy osd create --data /dev/sdw ceph4
ceph-deploy osd create --data /dev/sdx ceph4


ceph-deploy osd create --data /dev/sda ceph5
ceph-deploy osd create --data /dev/sdb ceph5
ceph-deploy osd create --data /dev/sdc ceph5
ceph-deploy osd create --data /dev/sdd ceph5
ceph-deploy osd create --data /dev/sde ceph5
ceph-deploy osd create --data /dev/sdf ceph5
ceph-deploy osd create --data /dev/sdg ceph5
ceph-deploy osd create --data /dev/sdh ceph5
ceph-deploy osd create --data /dev/sdi ceph5
ceph-deploy osd create --data /dev/sdj ceph5
ceph-deploy osd create --data /dev/sdk ceph5
ceph-deploy osd create --data /dev/sdl ceph5
ceph-deploy osd create --data /dev/sdm ceph5
ceph-deploy osd create --data /dev/sdn ceph5
ceph-deploy osd create --data /dev/sdo ceph5
ceph-deploy osd create --data /dev/sdp ceph5
ceph-deploy osd create --data /dev/sdq ceph5
ceph-deploy osd create --data /dev/sdr ceph5
ceph-deploy osd create --data /dev/sds ceph5
ceph-deploy osd create --data /dev/sdt ceph5
ceph-deploy osd create --data /dev/sdu ceph5
ceph-deploy osd create --data /dev/sdv ceph5
ceph-deploy osd create --data /dev/sdw ceph5
ceph-deploy osd create --data /dev/sdx ceph5


ceph-deploy osd create --data /dev/sda ceph6
ceph-deploy osd create --data /dev/sdb ceph6
ceph-deploy osd create --data /dev/sdc ceph6
ceph-deploy osd create --data /dev/sdd ceph6
ceph-deploy osd create --data /dev/sde ceph6
ceph-deploy osd create --data /dev/sdf ceph6
ceph-deploy osd create --data /dev/sdg ceph6
ceph-deploy osd create --data /dev/sdh ceph6
ceph-deploy osd create --data /dev/sdi ceph6
ceph-deploy osd create --data /dev/sdj ceph6
ceph-deploy osd create --data /dev/sdk ceph6
ceph-deploy osd create --data /dev/sdl ceph6
ceph-deploy osd create --data /dev/sdm ceph6
ceph-deploy osd create --data /dev/sdn ceph6
ceph-deploy osd create --data /dev/sdo ceph6
ceph-deploy osd create --data /dev/sdp ceph6
ceph-deploy osd create --data /dev/sdq ceph6
ceph-deploy osd create --data /dev/sdr ceph6
ceph-deploy osd create --data /dev/sds ceph6
ceph-deploy osd create --data /dev/sdt ceph6
ceph-deploy osd create --data /dev/sdu ceph6
ceph-deploy osd create --data /dev/sdv ceph6
ceph-deploy osd create --data /dev/sdw ceph6
ceph-deploy osd create --data /dev/sdx ceph6


ceph-deploy osd create --data /dev/sda ceph7
ceph-deploy osd create --data /dev/sdb ceph7
ceph-deploy osd create --data /dev/sdc ceph7
ceph-deploy osd create --data /dev/sdd ceph7
ceph-deploy osd create --data /dev/sde ceph7
ceph-deploy osd create --data /dev/sdf ceph7
ceph-deploy osd create --data /dev/sdg ceph7
ceph-deploy osd create --data /dev/sdh ceph7
ceph-deploy osd create --data /dev/sdi ceph7
ceph-deploy osd create --data /dev/sdj ceph7
ceph-deploy osd create --data /dev/sdk ceph7
ceph-deploy osd create --data /dev/sdl ceph7
ceph-deploy osd create --data /dev/sdm ceph7
ceph-deploy osd create --data /dev/sdn ceph7
ceph-deploy osd create --data /dev/sdo ceph7
ceph-deploy osd create --data /dev/sdp ceph7
ceph-deploy osd create --data /dev/sdq ceph7
ceph-deploy osd create --data /dev/sdr ceph7
ceph-deploy osd create --data /dev/sds ceph7
ceph-deploy osd create --data /dev/sdt ceph7
ceph-deploy osd create --data /dev/sdu ceph7
ceph-deploy osd create --data /dev/sdv ceph7
ceph-deploy osd create --data /dev/sdw ceph7
ceph-deploy osd create --data /dev/sdx ceph7


ceph-deploy osd create --data /dev/sda ceph8
ceph-deploy osd create --data /dev/sdb ceph8
ceph-deploy osd create --data /dev/sdc ceph8
ceph-deploy osd create --data /dev/sdd ceph8
ceph-deploy osd create --data /dev/sde ceph8
ceph-deploy osd create --data /dev/sdf ceph8
ceph-deploy osd create --data /dev/sdg ceph8
ceph-deploy osd create --data /dev/sdh ceph8
ceph-deploy osd create --data /dev/sdi ceph8
ceph-deploy osd create --data /dev/sdj ceph8
ceph-deploy osd create --data /dev/sdk ceph8
ceph-deploy osd create --data /dev/sdl ceph8
ceph-deploy osd create --data /dev/sdm ceph8
ceph-deploy osd create --data /dev/sdn ceph8
ceph-deploy osd create --data /dev/sdo ceph8
ceph-deploy osd create --data /dev/sdp ceph8
ceph-deploy osd create --data /dev/sdq ceph8
ceph-deploy osd create --data /dev/sdr ceph8
ceph-deploy osd create --data /dev/sds ceph8
ceph-deploy osd create --data /dev/sdt ceph8
ceph-deploy osd create --data /dev/sdu ceph8
ceph-deploy osd create --data /dev/sdv ceph8
ceph-deploy osd create --data /dev/sdw ceph8
ceph-deploy osd create --data /dev/sdx ceph8

 

16. ceph1上使得每个节点的 ceph 配置一致及赋予权限
ceph-deploy admin controller1 controller2 controller3 computer1 computer2 computer3 computer4 computer5 computer6 computer7 computer8 ceph2 ceph3 ceph4 ceph5 ceph6 ceph7 ceph8 cinder1 cinder2 cinder3

 

17. ceph1上查看端口8443是否打开
[root@ceph1 ceph]#  netstat -antpl | grep ceph-mgr | grep LISTEN 
tcp        0      0 10.64.0.54:6800         0.0.0.0:*               LISTEN      4450/ceph-mgr       
tcp6       0      0 :::8443                 :::*                    LISTEN      4450/ceph-mgr       
[root@ceph1 ceph]# 


18. ceph1 上查看osd状态
[root@ceph1 ceph]# ceph osd tree
ID  CLASS WEIGHT    TYPE NAME       STATUS REWEIGHT PRI-AFF 
 -1       733.56323 root default                            
 -3       174.65735     host ceph1                          
  0   hdd   7.27739         osd.0       up  1.00000 1.00000 
  1   hdd   7.27739         osd.1       up  1.00000 1.00000 
  2   hdd   7.27739         osd.2       up  1.00000 1.00000 
  3   hdd   7.27739         osd.3       up  1.00000 1.00000 
  4   hdd   7.27739         osd.4       up  1.00000 1.00000 
  5   hdd   7.27739         osd.5       up  1.00000 1.00000 
  6   hdd   7.27739         osd.6       up  1.00000 1.00000 
  7   hdd   7.27739         osd.7       up  1.00000 1.00000 
  8   hdd   7.27739         osd.8       up  1.00000 1.00000 
  9   hdd   7.27739         osd.9       up  1.00000 1.00000 
 10   hdd   7.27739         osd.10      up  1.00000 1.00000 
 11   hdd   7.27739         osd.11      up  1.00000 1.00000 
 12   hdd   7.27739         osd.12      up  1.00000 1.00000 
 13   hdd   7.27739         osd.13      up  1.00000 1.00000 
 14   hdd   7.27739         osd.14      up  1.00000 1.00000 
 15   hdd   7.27739         osd.15      up  1.00000 1.00000 
 16   hdd   7.27739         osd.16      up  1.00000 1.00000 
 17   hdd   7.27739         osd.17      up  1.00000 1.00000 
 18   hdd   7.27739         osd.18      up  1.00000 1.00000 
 19   hdd   7.27739         osd.19      up  1.00000 1.00000 
 20   hdd   7.27739         osd.20      up  1.00000 1.00000 
 21   hdd   7.27739         osd.21      up  1.00000 1.00000 
 22   hdd   7.27739         osd.22      up  1.00000 1.00000 
 23   hdd   7.27739         osd.23      up  1.00000 1.00000 
 -5       174.65685     host ceph2                          
 24   hdd   7.27739         osd.24      up  1.00000 1.00000 
 25   hdd   7.27739         osd.25      up  1.00000 1.00000 
 26   hdd   7.27739         osd.26      up  1.00000 1.00000 
 27   hdd   7.27739         osd.27      up  1.00000 1.00000 
 28   hdd   7.27739         osd.28      up  1.00000 1.00000 
 29   hdd   7.27739         osd.29      up  1.00000 1.00000 
 30   hdd   7.27739         osd.30      up  1.00000 1.00000 
 31   hdd   7.27739         osd.31      up  1.00000 1.00000 
 32   hdd   7.27739         osd.32      up  1.00000 1.00000 
 33   hdd   7.27739         osd.33      up  1.00000 1.00000 
 34   hdd   7.27739         osd.34      up  1.00000 1.00000 
 35   hdd   7.27739         osd.35      up  1.00000 1.00000 
 36   hdd   7.27739         osd.36      up  1.00000 1.00000 
 37   hdd   7.27739         osd.37      up  1.00000 1.00000 
 38   hdd   7.27739         osd.38      up  1.00000 1.00000 
 39   hdd   7.27739         osd.39      up  1.00000 1.00000 
 40   hdd   7.27739         osd.40      up  1.00000 1.00000 
 41   hdd   7.27739         osd.41      up  1.00000 1.00000 
 42   hdd   7.27739         osd.42      up  1.00000 1.00000 
 43   hdd   7.27739         osd.43      up  1.00000 1.00000 
 44   hdd   7.27739         osd.44      up  1.00000 1.00000 
 45   hdd   7.27739         osd.45      up  1.00000 1.00000 
 46   hdd   7.27739         osd.46      up  1.00000 1.00000 
 47   hdd   7.27689         osd.47      up  1.00000 1.00000 
 -7       174.65735     host ceph3                          
 48   hdd   7.27739         osd.48      up  1.00000 1.00000 
 49   hdd   7.27739         osd.49      up  1.00000 1.00000 
 50   hdd   7.27739         osd.50      up  1.00000 1.00000 
 51   hdd   7.27739         osd.51      up  1.00000 1.00000 
 52   hdd   7.27739         osd.52      up  1.00000 1.00000 
 53   hdd   7.27739         osd.53      up  1.00000 1.00000 
 54   hdd   7.27739         osd.54      up  1.00000 1.00000 
 55   hdd   7.27739         osd.55      up  1.00000 1.00000 
 56   hdd   7.27739         osd.56      up  1.00000 1.00000 
 57   hdd   7.27739         osd.57      up  1.00000 1.00000 
 58   hdd   7.27739         osd.58      up  1.00000 1.00000 
 59   hdd   7.27739         osd.59      up  1.00000 1.00000 
 60   hdd   7.27739         osd.60      up  1.00000 1.00000 
 61   hdd   7.27739         osd.61      up  1.00000 1.00000 
 62   hdd   7.27739         osd.62      up  1.00000 1.00000 
 63   hdd   7.27739         osd.63      up  1.00000 1.00000 
 64   hdd   7.27739         osd.64      up  1.00000 1.00000 
 65   hdd   7.27739         osd.65      up  1.00000 1.00000 
 66   hdd   7.27739         osd.66      up  1.00000 1.00000 
 67   hdd   7.27739         osd.67      up  1.00000 1.00000 
 68   hdd   7.27739         osd.68      up  1.00000 1.00000 
 69   hdd   7.27739         osd.69      up  1.00000 1.00000 
 70   hdd   7.27739         osd.70      up  1.00000 1.00000 
 71   hdd   7.27739         osd.71      up  1.00000 1.00000 
-13        41.91833     host ceph4                          
 72   ssd   1.74660         osd.72      up  1.00000 1.00000 
 73   ssd   1.74660         osd.73      up  1.00000 1.00000 
 74   ssd   1.74660         osd.74      up  1.00000 1.00000 
 75   ssd   1.74660         osd.75      up  1.00000 1.00000 
 76   ssd   1.74660         osd.76      up  1.00000 1.00000 
 77   ssd   1.74660         osd.77      up  1.00000 1.00000 
 78   ssd   1.74660         osd.78      up  1.00000 1.00000 
 79   ssd   1.74660         osd.79      up  1.00000 1.00000 
 80   ssd   1.74660         osd.80      up  1.00000 1.00000 
 81   ssd   1.74660         osd.81      up  1.00000 1.00000 
 82   ssd   1.74660         osd.82      up  1.00000 1.00000 
 83   ssd   1.74660         osd.83      up  1.00000 1.00000 
 84   ssd   1.74660         osd.84      up  1.00000 1.00000 
 85   ssd   1.74660         osd.85      up  1.00000 1.00000 
 86   ssd   1.74660         osd.86      up  1.00000 1.00000 
 87   ssd   1.74660         osd.87      up  1.00000 1.00000 
 88   ssd   1.74660         osd.88      up  1.00000 1.00000 
 89   ssd   1.74660         osd.89      up  1.00000 1.00000 
 90   ssd   1.74660         osd.90      up  1.00000 1.00000 
 91   ssd   1.74660         osd.91      up  1.00000 1.00000 
 92   ssd   1.74660         osd.92      up  1.00000 1.00000 
 93   ssd   1.74660         osd.93      up  1.00000 1.00000 
 94   ssd   1.74660         osd.94      up  1.00000 1.00000 
 95   ssd   1.74660         osd.95      up  1.00000 1.00000 
-16        41.91833     host ceph5                          
 96   ssd   1.74660         osd.96      up  1.00000 1.00000 
 97   ssd   1.74660         osd.97      up  1.00000 1.00000 
 98   ssd   1.74660         osd.98      up  1.00000 1.00000 
 99   ssd   1.74660         osd.99      up  1.00000 1.00000 
100   ssd   1.74660         osd.100     up  1.00000 1.00000 
101   ssd   1.74660         osd.101     up  1.00000 1.00000 
102   ssd   1.74660         osd.102     up  1.00000 1.00000 
103   ssd   1.74660         osd.103     up  1.00000 1.00000 
104   ssd   1.74660         osd.104     up  1.00000 1.00000 
105   ssd   1.74660         osd.105     up  1.00000 1.00000 
106   ssd   1.74660         osd.106     up  1.00000 1.00000 
107   ssd   1.74660         osd.107     up  1.00000 1.00000 
108   ssd   1.74660         osd.108     up  1.00000 1.00000 
109   ssd   1.74660         osd.109     up  1.00000 1.00000 
110   ssd   1.74660         osd.110     up  1.00000 1.00000 
111   ssd   1.74660         osd.111     up  1.00000 1.00000 
112   ssd   1.74660         osd.112     up  1.00000 1.00000 
113   ssd   1.74660         osd.113     up  1.00000 1.00000 
114   ssd   1.74660         osd.114     up  1.00000 1.00000 
115   ssd   1.74660         osd.115     up  1.00000 1.00000 
116   ssd   1.74660         osd.116     up  1.00000 1.00000 
117   ssd   1.74660         osd.117     up  1.00000 1.00000 
118   ssd   1.74660         osd.118     up  1.00000 1.00000 
119   ssd   1.74660         osd.119     up  1.00000 1.00000 
-19        41.91833     host ceph6                          
120   ssd   1.74660         osd.120     up  1.00000 1.00000 
121   ssd   1.74660         osd.121     up  1.00000 1.00000 
122   ssd   1.74660         osd.122     up  1.00000 1.00000 
123   ssd   1.74660         osd.123     up  1.00000 1.00000 
124   ssd   1.74660         osd.124     up  1.00000 1.00000 
125   ssd   1.74660         osd.125     up  1.00000 1.00000 
126   ssd   1.74660         osd.126     up  1.00000 1.00000 
127   ssd   1.74660         osd.127     up  1.00000 1.00000 
128   ssd   1.74660         osd.128     up  1.00000 1.00000 
129   ssd   1.74660         osd.129     up  1.00000 1.00000 
130   ssd   1.74660         osd.130     up  1.00000 1.00000 
131   ssd   1.74660         osd.131     up  1.00000 1.00000 
132   ssd   1.74660         osd.132     up  1.00000 1.00000 
133   ssd   1.74660         osd.133     up  1.00000 1.00000 
134   ssd   1.74660         osd.134     up  1.00000 1.00000 
135   ssd   1.74660         osd.135     up  1.00000 1.00000 
136   ssd   1.74660         osd.136     up  1.00000 1.00000 
137   ssd   1.74660         osd.137     up  1.00000 1.00000 
138   ssd   1.74660         osd.138     up  1.00000 1.00000 
139   ssd   1.74660         osd.139     up  1.00000 1.00000 
140   ssd   1.74660         osd.140     up  1.00000 1.00000 
141   ssd   1.74660         osd.141     up  1.00000 1.00000 
142   ssd   1.74660         osd.142     up  1.00000 1.00000 
143   ssd   1.74660         osd.143     up  1.00000 1.00000 
-22        41.91833     host ceph7                          
144   ssd   1.74660         osd.144     up  1.00000 1.00000 
145   ssd   1.74660         osd.145     up  1.00000 1.00000 
146   ssd   1.74660         osd.146     up  1.00000 1.00000 
147   ssd   1.74660         osd.147     up  1.00000 1.00000 
148   ssd   1.74660         osd.148     up  1.00000 1.00000 
149   ssd   1.74660         osd.149     up  1.00000 1.00000 
150   ssd   1.74660         osd.150     up  1.00000 1.00000 
151   ssd   1.74660         osd.151     up  1.00000 1.00000 
152   ssd   1.74660         osd.152     up  1.00000 1.00000 
153   ssd   1.74660         osd.153     up  1.00000 1.00000 
154   ssd   1.74660         osd.154     up  1.00000 1.00000 
155   ssd   1.74660         osd.155     up  1.00000 1.00000 
156   ssd   1.74660         osd.156     up  1.00000 1.00000 
157   ssd   1.74660         osd.157     up  1.00000 1.00000 
158   ssd   1.74660         osd.158     up  1.00000 1.00000 
159   ssd   1.74660         osd.159     up  1.00000 1.00000 
160   ssd   1.74660         osd.160     up  1.00000 1.00000 
161   ssd   1.74660         osd.161     up  1.00000 1.00000 
162   ssd   1.74660         osd.162     up  1.00000 1.00000 
163   ssd   1.74660         osd.163     up  1.00000 1.00000 
164   ssd   1.74660         osd.164     up  1.00000 1.00000 
165   ssd   1.74660         osd.165     up  1.00000 1.00000 
166   ssd   1.74660         osd.166     up  1.00000 1.00000 
167   ssd   1.74660         osd.167     up  1.00000 1.00000 
-25        41.91833     host ceph8                          
168   ssd   1.74660         osd.168     up  1.00000 1.00000 
169   ssd   1.74660         osd.169     up  1.00000 1.00000 
170   ssd   1.74660         osd.170     up  1.00000 1.00000 
171   ssd   1.74660         osd.171     up  1.00000 1.00000 
172   ssd   1.74660         osd.172     up  1.00000 1.00000 
173   ssd   1.74660         osd.173     up  1.00000 1.00000 
174   ssd   1.74660         osd.174     up  1.00000 1.00000 
175   ssd   1.74660         osd.175     up  1.00000 1.00000 
176   ssd   1.74660         osd.176     up  1.00000 1.00000 
177   ssd   1.74660         osd.177     up  1.00000 1.00000 
178   ssd   1.74660         osd.178     up  1.00000 1.00000 
179   ssd   1.74660         osd.179     up  1.00000 1.00000 
180   ssd   1.74660         osd.180     up  1.00000 1.00000 
181   ssd   1.74660         osd.181     up  1.00000 1.00000 
182   ssd   1.74660         osd.182     up  1.00000 1.00000 
183   ssd   1.74660         osd.183     up  1.00000 1.00000 
184   ssd   1.74660         osd.184     up  1.00000 1.00000 
185   ssd   1.74660         osd.185     up  1.00000 1.00000 
186   ssd   1.74660         osd.186     up  1.00000 1.00000 
187   ssd   1.74660         osd.187     up  1.00000 1.00000 
188   ssd   1.74660         osd.188     up  1.00000 1.00000 
189   ssd   1.74660         osd.189     up  1.00000 1.00000 
190   ssd   1.74660         osd.190     up  1.00000 1.00000 
191   ssd   1.74660         osd.191     up  1.00000 1.00000 
[root@ceph1 ceph]# 

-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

设置隔离故障域

通过crushmap隔离故障域,让pg分布在不同机柜上主机上
但这样故域还是host,pg的分布还是比较散乱的,但集群规模大时,如果按照默认的host为故障域的话副本pg很有可能分布同一机架相邻的host的osd上,这样如果你一但此机架断电很有可能导致集群出现ERROR

1. rack方案
ceph1-4 ---A柜
ceph5-8 ---B柜

 

2. 添加racks:
ceph osd crush add-bucket rackA rack
ceph osd crush add-bucket rackB rack

 

3. 把每一个host移动到相应的rack下面:
ceph osd crush move ceph1 rack=rackA
ceph osd crush move ceph2 rack=rackA
ceph osd crush move ceph3 rack=rackA
ceph osd crush move ceph4 rack=rackA
ceph osd crush move ceph5 rack=rackB
ceph osd crush move ceph6 rack=rackB
ceph osd crush move ceph7 rack=rackB
ceph osd crush move ceph8 rack=rackB

 

5. 把所有rack移动到对应 root 下面:
ceph osd crush move rackA root=default
ceph osd crush move rackB root=default


-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------


19. ceph1上查看CLASS,只有ssd和hdd类型
[root@ceph1 ~]# ceph osd crush class ls
[
    "ssd",
    "hdd"
]
[root@ceph1 ~]# 


20. ceph1上导出crush map
ceph osd getcrushmap -o crushmap.map


21. ceph1上将map转为可读模式
crushtool -d crushmap.map -o crushmap.txt


22. ceph1沙漠化编辑crushmap.txt

vim crushmap.txt

在 # rule 那一栏 replicated_rule 的后面添加 ssd 和 hdd  # 粉红色为手动添加内容

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable chooseleaf_stable 1
tunable straw_calc_version 1
tunable allowed_bucket_algs 54

# devices
device 0 osd.0 class hdd
device 1 osd.1 class hdd
device 2 osd.2 class hdd
device 3 osd.3 class hdd
device 4 osd.4 class hdd
device 5 osd.5 class hdd
device 6 osd.6 class hdd
device 7 osd.7 class hdd
device 8 osd.8 class hdd
device 9 osd.9 class hdd
device 10 osd.10 class hdd
device 11 osd.11 class hdd
device 12 osd.12 class hdd
device 13 osd.13 class hdd
device 14 osd.14 class hdd
device 15 osd.15 class hdd
device 16 osd.16 class hdd
device 17 osd.17 class hdd
device 18 osd.18 class hdd
device 19 osd.19 class hdd
device 20 osd.20 class hdd
device 21 osd.21 class hdd
device 22 osd.22 class hdd
device 23 osd.23 class hdd

# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 region
type 10 root

# buckets
root default {
 id -1  # do not change unnecessarily
 id -2 class hdd  # do not change unnecessarily
 # weight 0.000
 alg straw2
 hash 0 # rjenkins1
}
host ceph1 {
 id -3  # do not change unnecessarily
 id -4 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item osd.0 weight 5.458
 item osd.1 weight 5.458
 item osd.2 weight 5.458
 item osd.3 weight 5.458
 item osd.4 weight 5.458
 item osd.5 weight 5.458
 item osd.6 weight 5.458
 item osd.7 weight 5.458
}
host ceph-node2 {
 id -5  # do not change unnecessarily
 id -6 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item osd.8 weight 5.458
 item osd.9 weight 5.458
 item osd.10 weight 5.458
 item osd.11 weight 5.458
 item osd.12 weight 5.458
 item osd.13 weight 5.458
 item osd.14 weight 5.458
 item osd.15 weight 5.458
}
host ceph-node3 {
 id -7  # do not change unnecessarily
 id -8 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item osd.16 weight 5.458
 item osd.17 weight 5.458
 item osd.18 weight 5.458
 item osd.19 weight 5.458
 item osd.20 weight 5.458
 item osd.21 weight 5.458
 item osd.22 weight 5.458
 item osd.23 weight 5.458
}
rack rackA {
 id -9  # do not change unnecessarily
 id -14 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item ceph1 weight 43.664
}
rack rackB {
 id -10  # do not change unnecessarily
 id -13 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item ceph-node2 weight 43.664
}
rack rackC {
 id -11  # do not change unnecessarily
 id -12 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item ceph-node3 weight 43.664
}
root rack-a {
 id -17  # do not change unnecessarily
 id -18 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item rackA weight 43.664
}
root rack-b {
 id -19  # do not change unnecessarily
 id -20 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item rackB weight 43.664
}
root rack-c {
 id -21  # do not change unnecessarily
 id -22 class hdd  # do not change unnecessarily
 # weight 43.664
 alg straw2
 hash 0 # rjenkins1
 item rackC weight 43.664
}

# rules
rule replicated_rule {
 id 0
 type replicated
 min_size 1
 max_size 10
 step take default
 step chooseleaf firstn 0 type host
 step emit
}

rule hdd {
 id 1
 type replicated
 min_size 1
 max_size 10
 step take default class hdd
 step chooseleaf firstn 0 type osd
 step emit
}

rule ssd {
 id 2
 type replicated
 min_size 1
 max_size 10
 step take default class ssd
 step chooseleaf firstn 0 type osd
 step emit
}

# end crush map


23. ceph1上重新编译为二进制
crushtool -c crushmap.txt -o crushmap.map


24. ceph1上导入ceph
ceph osd setcrushmap -i crushmap.map

 


25. ceph1上创建不同的pool
ceph osd pool create vms_ssd 1024 ssd
ceph osd pool create vms_hdd 1024 hdd
ceph osd pool create volumes_ssd 1024 ssd
ceph osd pool create volumes_hdd 1024 hdd
ceph osd pool create images 1024 ssd


ceph osd pool application enable vms_ssd rbd
ceph osd pool application enable vms_hdd rbd
ceph osd pool application enable volumes_ssd rbd
ceph osd pool application enable volumes_hdd rbd
ceph osd pool application enable images rbd


26. ceph1上ceph.conf配置文件设置
vim /etc/ceph/ceph.conf
在[global]下添加
osd_crush_update_on_start = true


27. ceph1上同步 ceph 配置
 ceph-deploy   --overwrite-conf  admin  controller1 controller2 controller3 computer1 computer2 computer3 computer4 computer5 computer6 computer7 computer8 ceph2 ceph3 ceph4 ceph5 ceph6 ceph7 ceph8 cinder1 cinder2 cinder3


28. ceph1上重启mgr服务
ceph mgr module disable dashboard && ceph mgr module enable dashboard  


29. ceph1上创建ceph用户和密钥
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes_ssd, allow rwx pool=volumes_hdd, allow rwx pool=vms_ssd, allow rwx pool=vms_hdd, allow rx pool=images'


30. ceph1上创建文件及分配权限
ceph auth get-or-create client.glance | tee /etc/ceph/ceph.client.glance.keyring
ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring

chown glance:glance /etc/ceph/ceph.client.glance.keyring
chown nova:nova /etc/ceph/ceph.client.cinder.keyring


31. ceph1上拷贝秘钥到对应节点,修改权限
#glance
scp /etc/ceph/ceph.client.glance.keyring controller1:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring controller2:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring controller3:/etc/ceph/
ssh controller1 sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
ssh controller2 sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
ssh controller3 sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring

 


#nova compute
scp /etc/ceph/ceph.client.cinder.keyring computer1:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring computer2:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring computer3:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring computer4:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring computer5:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring computer6:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring computer7:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring computer8:/etc/ceph
ssh computer1 sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
ssh computer2 sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
ssh computer3 sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
ssh computer4 sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
ssh computer5 sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
ssh computer6 sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
ssh computer7 sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
ssh computer8 sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
scp /etc/ceph/ceph.client.glance.keyring computer1:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring computer2:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring computer3:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring computer4:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring computer5:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring computer6:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring computer7:/etc/ceph/
scp /etc/ceph/ceph.client.glance.keyring computer8:/etc/ceph/
ssh computer1 sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring
ssh computer2 sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring
ssh computer3 sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring
ssh computer4 sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring
ssh computer5 sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring
ssh computer6 sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring
ssh computer7 sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring
ssh computer8 sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring

 

#cinder storage
scp /etc/ceph/ceph.client.cinder.keyring cinder1:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring cinder2:/etc/ceph/
scp /etc/ceph/ceph.client.cinder.keyring cinder3:/etc/ceph/
ssh cinder1 sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
ssh cinder2 sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
ssh cinder3 sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
------------------------------------------------------------------------------------------------------------------------------------------------------------------
#glance集成ceph


1 . controller1 2 3上操作
#更改 glance 默认存储为 ceph, 注释其他 [glance_store] 的配置

echo '
[glance_store]
stores = rbd,file
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
'>>/etc/glance/glance-api.conf

 

2. controller1 2 3上重启glance服务
systemctl restart openstack-glance-api.service openstack-glance-registry.service
systemctl status openstack-glance-api.service openstack-glance-registry.service

 

3. controller1测试 上传镜像:
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img


4. controller1将QCOW2转换为RAW。 建议Ceph始终使用RAW格式
qemu-img convert cirros-0.4.0-x86_64-disk.img cirros-0.4.0-x86_64-disk.raw


5. controller1将镜像添加到Glance。
openstack image create "cirros-0.4.0-x86_64" --file cirros-0.4.0-x86_64-disk.raw --disk-format raw --container-format bare --public


6. 任意节点检查Ceph中是否存在Glance图像。
rbd ls images

d0f18cee-dad6-4e6b-9c39-ffa491b48a1d
------------------------------------------------------------------------------------------------------------------------------------------------------------------
------------------------------------------------------------------------------------------------------------------------------------------------------------------
#nova集成ceph


1. computer1上生成一个uuid(这里只需要在computer1生成一个uuid)
uuidgen 

[root@computer1 ceph]# uuidgen 
a55522e2-2d92-4639-baed-74c84b8137e0


2. computer1 2 ....上创建libvirt ceph文件,设置uuid
echo '
<secret ephemeral="no" private="no">   
<uuid>a55522e2-2d92-4639-baed-74c84b8137e0</uuid>   
<usage type="ceph">   
<name>client.cinder secret</name>   
</usage>   
</secret> 
'>ceph.xml

virsh secret-define --file ceph.xml
virsh secret-set-value --secret a55522e2-2d92-4639-baed-74c84b8137e0 --base64 $(cat /etc/ceph/ceph.client.cinder.keyring |grep key|awk -F ' '  '{print $3}')


3. computer1 2 ....上查看
virsh secret-list  


4. computer1 2 ....上创建
echo '
[libvirt]
virt_type = kvm
cpu_mode = host-passthrough
block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_NON_SHARED_INC
live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST
images_type = rbd
images_rbd_pool = vms_ssd
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = a55522e2-2d92-4639-baed-74c84b8137e0
disk_cachemodes = "network=writeback"
inject_password = True
inject_key = false
inject_partition = -2
hw_disk_discard = unmap
'>>/etc/nova/nova.conf


5. computer1 2 ....重启 nova-compute.service
systemctl restart openstack-nova-compute.service libvirtd.service
systemctl status openstack-nova-compute.service libvirtd.service
------------------------------------------------------------------------------------------------------------------------------------------------------------------
#cinder集成ceph

1. cinder1 2 3上操作

echo '
[ssd]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes_ssd
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = a55522e2-2d92-4639-baed-74c84b8137e0
volume_backend_name = ssd

[hdd]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes_hdd
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = a55522e2-2d92-4639-baed-74c84b8137e0
volume_backend_name = hdd
'>>/etc/cinder/cinder.conf


2. cinder1 2 3重启服务
systemctl restart openstack-cinder-volume.service target.service
systemctl status openstack-cinder-volume.service target.service


4. controller1查看cinder是否有@ceph存储
cinder service-list
cinder-manage service list


执行结果如下:
[root@controller1 ~]# cinder service-list
cinder-manage service list
+------------------+-------------+------+---------+-------+----------------------------+-----------------+
| Binary           | Host        | Zone | Status  | State | Updated_at                 | Disabled Reason |
+------------------+-------------+------+---------+-------+----------------------------+-----------------+
| cinder-scheduler | controller1 | nova | enabled | up    | 2019-03-27T04:26:12.000000 | -               |
| cinder-scheduler | controller2 | nova | enabled | up    | 2019-03-27T04:26:05.000000 | -               |
| cinder-scheduler | controller3 | nova | enabled | up    | 2019-03-27T04:26:09.000000 | -               |
| cinder-volume    | cinder1@hdd | nova | enabled | up    | 2019-03-27T04:26:05.000000 | -               |
| cinder-volume    | cinder1@ssd | nova | enabled | up    | 2019-03-27T04:26:05.000000 | -               |
| cinder-volume    | cinder2@hdd | nova | enabled | up    | 2019-03-27T04:26:05.000000 | -               |
| cinder-volume    | cinder2@ssd | nova | enabled | up    | 2019-03-27T04:26:05.000000 | -               |
| cinder-volume    | cinder3@hdd | nova | enabled | up    | 2019-03-27T04:26:07.000000 | -               |
| cinder-volume    | cinder3@ssd | nova | enabled | up    | 2019-03-27T04:26:07.000000 | -               |
+------------------+-------------+------+---------+-------+----------------------------+-----------------+
[root@controller1 ~]# cinder-manage service list
Deprecated: Option "logdir" from group "DEFAULT" is deprecated. Use option "log-dir" from group "DEFAULT".
Binary           Host                                 Zone             Status     State Updated At           RPC Version  Object Version  Cluster                             
cinder-scheduler controller1                          nova             enabled    :-)   2019-03-27 04:26:12  3.11         1.37                                                
cinder-scheduler controller2                          nova             enabled    :-)   2019-03-27 04:26:15  3.11         1.37                                                
cinder-scheduler controller3                          nova             enabled    :-)   2019-03-27 04:26:09  3.11         1.37                                                
cinder-volume    cinder1@ssd                          nova             enabled    :-)   2019-03-27 04:26:15  3.16         1.37                                                
cinder-volume    cinder1@hdd                          nova             enabled    :-)   2019-03-27 04:26:15  3.16         1.37                                                
cinder-volume    cinder2@ssd                          nova             enabled    :-)   2019-03-27 04:26:15  3.16         1.37                                                
cinder-volume    cinder2@hdd                          nova             enabled    :-)   2019-03-27 04:26:15  3.16         1.37                                                
cinder-volume    cinder3@ssd                          nova             enabled    :-)   2019-03-27 04:26:07  3.16         1.37                                                
cinder-volume    cinder3@hdd                          nova             enabled    :-)   2019-03-27 04:26:07  3.16         1.37 

32. controller1创建磁盘ssd和hdd磁盘类型
cinder --os-username admin --os-tenant-name admin type-create ssd
cinder --os-username admin --os-tenant-name admin type-key ssd set volume_backend_name=ssd
cinder --os-username admin --os-tenant-name admin extra-specs-list


cinder --os-username admin --os-tenant-name admin type-create hdd
cinder --os-username admin --os-tenant-name admin type-key hdd set volume_backend_name=hdd
cinder --os-username admin --os-tenant-name admin extra-specs-list

 

 

33. controller1上分别创建ssd和hdd类型云盘

#ssd
cinder create --display-name 'disk-ssd01' --description "ssd"  --volume-type ssd 10
cinder create --display-name 'disk-ssd02' --description "ssd"  --volume-type ssd 10
cinder create --display-name 'disk-ssd03' --description "ssd"  --volume-type ssd 10
cinder create --display-name 'disk-ssd04' --description "ssd"  --volume-type ssd 10
cinder create --display-name 'disk-ssd05' --description "ssd"  --volume-type ssd 10
cinder create --display-name 'disk-ssd06' --description "ssd"  --volume-type ssd 20
cinder create --display-name 'disk-ssd07' --description "ssd"  --volume-type ssd 20


#hdd
cinder create --display-name 'disk-hdd01' --description "hdd"  --volume-type hdd 10
cinder create --display-name 'disk-hdd02' --description "hdd"  --volume-type hdd 10
cinder create --display-name 'disk-hdd03' --description "hdd"  --volume-type hdd 10
cinder create --display-name 'disk-hdd04' --description "hdd"  --volume-type hdd 10
cinder create --display-name 'disk-hdd05' --description "hdd"  --volume-type hdd 10
cinder create --display-name 'disk-hdd06' --description "hdd"  --volume-type hdd 20
cinder create --display-name 'disk-hdd07' --description "hdd"  --volume-type hdd 20

 

34. controller1上查看
cinder list

 

执行结果如下
[root@controller1 ~]# cinder list
+--------------------------------------+-----------+------------+------+-------------+----------+-------------+
| ID                                   | Status    | Name       | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+------------+------+-------------+----------+-------------+
| 0df0febd-3c0c-4b36-875b-326ba04910ae | available | disk-ssd15 | 10   | ssd         | false    |             |
| 0f0d53c1-01fa-4a44-9f2c-ad261c64ab8e | available | disk-ssd02 | 10   | ssd         | false    |             |
| 16c54644-80c8-42cb-8a25-989559a74cbc | available | disk-ssd01 | 10   | ssd         | false    |             |
| 20278d12-8e6a-4f7b-bc1f-b270337b9c19 | available | disk-hdd14 | 10   | hdd         | false    |             |
| 23c8a426-955c-4448-a360-f9b3b01e771d | available | disk-hdd02 | 10   | hdd         | false    |             |
| 2a825ea4-bcb4-4677-b4d8-8327580c1e5a | available | disk-hdd09 | 30   | hdd         | false    |             |
| 2e58945e-82b9-4fb2-be15-46bff12c2ebd | available | disk-hdd19 | 30   | hdd         | false    |             |
| 2fcaea2a-f695-4aa9-b536-4a85804b75a0 | available | disk-hdd10 | 50   | hdd         | false    |             |
| 33178ff2-e8f7-457a-a8ee-3f1ca178acd1 | available | disk-ssd05 | 10   | ssd         | false    |             |
| 3c524bd3-4900-4a65-93f9-b22376230a5b | available | disk-hdd11 | 10   | hdd         | false    |             |
| 435791f4-7117-4dab-82a5-c0580b91db62 | available | disk-ssd03 | 10   | ssd         | false    |             |
| 4cd3be32-2ed0-418a-9181-74724ec98968 | available | disk-ssd11 | 10   | ssd         | false    |             |
| 4f6dceb4-2a69-47f6-8e6c-df2950d91215 | available | disk-hdd07 | 20   | hdd         | false    |             |
| 525bbaac-bc08-4186-8756-5e7c5c3b6d10 | available | disk-hdd04 | 10   | hdd         | false    |             |
| 5e9bc16a-5958-471c-bd90-c869b5d5fc54 | available | disk-hdd03 | 10   | hdd         | false    |             |
| 62497ade-5ff1-457a-b66c-414c2a748cfd | available | disk-hdd01 | 10   | hdd         | false    |             |
| 63dafb97-0915-4fa5-bf6f-2f2dcaa3c1ee | available | disk-hdd17 | 20   | hdd         | false    |             |
| 6d02fab1-8c28-47bd-a6a6-8b1052b840ac | available | disk-ssd16 | 20   | ssd         | false    |             |
| 7842c4ca-0299-492c-b4ee-713c45addecf | available | disk-hdd16 | 20   | hdd         | false    |             |
| 7bfffe54-c5bd-4d33-aaea-3363a6193376 | available | disk-ssd04 | 10   | ssd         | false    |             |
| 7e6c084d-33f6-4e70-b0de-4d4ffa26f7a1 | available | disk-ssd12 | 10   | ssd         | false    |             |
| 81006c86-4096-40e3-ba36-c640377e78d3 | available | disk-hdd12 | 10   | hdd         | false    |             |
| 83bf9025-e79e-4c6f-b04f-e1581d2ebfc8 | available | disk-ssd18 | 20   | ssd         | false    |             |
| 8913a757-ec00-498c-b6a2-7def4659ba59 | available | disk-hdd20 | 50   | hdd         | false    |             |
| 8cf9c5fa-5a4c-4850-9e69-ec28a0bb0b03 | available | disk-hdd06 | 20   | hdd         | false    |             |
| 9805a937-871b-4198-94c1-060c1690522e | available | disk-hdd15 | 10   | hdd         | false    |             |
| a88d4f7e-c141-417a-af0d-c8ccf3633488 | available | disk-hdd18 | 20   | hdd         | false    |             |
| b771897b-5e4a-4825-b712-f3c7395c407c | available | disk-ssd08 | 20   | ssd         | false    |             |
| bece47f3-ef26-4ca5-940d-3ae8102d100e | available | disk-hdd08 | 20   | hdd         | false    |             |
| bf063f3c-8c53-4dbc-a843-295d0d752804 | available | disk-ssd13 | 10   | ssd         | false    |             |
| c727dcd6-4e75-4552-9b73-cfd5744a7994 | available | disk-hdd13 | 10   | hdd         | false    |             |
| d6ba28e7-a09c-4101-ae0f-074d8e288643 | available | disk-ssd06 | 20   | ssd         | false    |             |
| dc731d30-ff33-4b2b-b97e-4bbbcac1a455 | available | disk-ssd10 | 50   | ssd         | false    |             |
| dceefc2b-49ba-454e-945b-621a302444a5 | available | disk-ssd17 | 20   | ssd         | false    |             |
| e2ee9745-21d9-4ff6-ab00-34b142db056f | available | disk-ssd14 | 10   | ssd         | false    |             |
| e561d520-bd1a-4709-8421-5c4e50b825f0 | available | disk-ssd19 | 30   | ssd         | false    |             |
| eda6f2a5-8e7e-406f-9a49-8f374fe96dc0 | available | disk-ssd20 | 50   | ssd         | false    |             |
| ee42391f-f5d1-4e13-b7e3-837cfc4ab7a9 | available | disk-hdd05 | 10   | hdd         | false    |             |
| f40686d1-b909-4efe-a9d4-6b32bc0b5015 | available | disk-ssd09 | 30   | ssd         | false    |             |
| f5f0b1c4-945e-4b68-ad97-a4633554e096 | available | disk-ssd07 | 20   | ssd         | false    |             |
+--------------------------------------+-----------+------------+------+-------------+----------+-------------+

 




 

 

posted @ 2021-04-23 15:48  星空之源  阅读(506)  评论(0编辑  收藏  举报