ceph deploy部署ceph集群 ceph扩容 rbd存储

架构拓扑

节点主机 节点IP 节点角色 OS
ceph-admin 10.0.0.60 admin deploy mds centos7
ceph-node1 10.0.0.61 mon osd mds centos7
ceph-node2 10.0.0.62 mon osd mds centos7
ceph-node3 10.0.0.63 mon osd mds centos7

前期准备工作[所有ceph服务器都执行]

#基础优化
#1. yum源优化
rm -f /etc/yum.repos.d/*
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo


#2. selinux关闭
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0

#3. 时间同步
echo '*/5 * * * * /usr/sbin/ntpdate -u ntp.api.bz' >>/var/spool/cron/root
systemctl restart crond.service
crontab -l

#4. 关闭swap分区
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a

#5. 配置ceph源:
cat >/etc/yum.repos.d/ceph.repo<<eof
[ceph]
name=ceph
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
gpgcheck=0
enabled=1
[x86_64]
name=x86_64
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
gpgcheck=0
enabled=1
eof

yum clean all



#6. 每台服务器多增加一块20G硬盘 磁盘扫描[用于发现新磁盘]
echo "- - -" > /sys/class/scsi_host/host0/scan
echo "- - -" > /sys/class/scsi_host/host1/scan
echo "- - -" > /sys/class/scsi_host/host2/scan



#7. 配置主机名
cat >/etc/hosts <<eof
10.0.0.60 admin  ceph-admin
10.0.0.61 ceph01 ceph-node1
10.0.0.62 ceph02 ceph-node2
10.0.0.63 ceph03 ceph-node3
eof


#8. 设置各自主机名
hostnamectl set-hostname [主机名]
bash


#8. 配置互信:

ssh-keygen -f ~/.ssh/id_rsa -N ''

ssh-copy-id -i ~/.ssh/id_rsa.pub root@ceph-admin
ssh-copy-id -i ~/.ssh/id_rsa.pub root@ceph-node1
ssh-copy-id -i ~/.ssh/id_rsa.pub root@ceph-node2
ssh-copy-id -i ~/.ssh/id_rsa.pub root@ceph-node3


#验证[远程查看另一台服务器IP测试执行是否需要密码]:
[root@ceph-admin ~]# ssh 10.0.0.61 "ifconfig eth0"
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.61  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::20c:29ff:fe86:4512  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:86:45:12  txqueuelen 1000  (Ethernet)
        RX packets 70680  bytes 98327829 (93.7 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 11931  bytes 1237409 (1.1 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
     
     
拷贝ceph源到其他服务器
cd /etc/yum.repos.d
scp * root@ceph-node1:/etc/yum.repos.d/
scp * root@ceph-node2:/etc/yum.repos.d/
scp * root@ceph-node3:/etc/yum.repos.d/

        
#9. 时间同步:

ceph-admin 作为时间服务器,其他服务器进行同步
9.1. ceph-admin配置:
yum install -y ntp
systemctl start ntpd
systemctl enable ntpd
timedatectl set-timezone Asia/Shanghai    #时区设置为上海

9.2 设置为阿里云时间同步:
时间服务器配置[ceph-admin服务器作为时间服务器]:
找到:
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst

修改为:
server ntp1.aliyun.com iburst

重启ntp:
systemctl restart ntpd

查看时间同步: ntpq -pn
[root@ceph-admin ~]# ntpq -pn
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*120.25.115.20   10.137.53.7      2 u    1   64    1   36.838    5.072   0.622


其他服务器配置:
yum install -y ntp
systemctl start ntpd
systemctl enable ntpd
timedatectl set-timezone Asia/Shanghai    #时区设置为上海

#sed修改:
 sed  -i '20,25s/^server.*/# &/' /etc/ntp.conf
 sed  -i "25iserver 10.0.0.60 iburst" /etc/ntp.conf

#重启服务:
systemctl restart ntpd
systemctl enable ntpd
ntpq -pn

#查看时间同步状态,检查是否与时间服务器同步时间:
[root@ceph-node1 ~]# ntpq -pn
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*10.0.0.60       120.25.115.20    3 u    6   64  377    0.334    0.582   0.501
#出现 * 号 代表同步时间了

安装ceph [ceph-admin 节点]

[root@ceph-admin ~]# yum install -y python-setuptools ceph-deploy
[root@ceph-admin ~]# ceph-deploy --version
2.0.1         #<-------建议部署2.0版本以上,且只需要在部署节点安装


#创建目录,作为初始化目录
[root@ceph-admin ~]# mkdir /my-cluster
[root@ceph-admin ~]# cd  /my-cluster

#配置monitor节点:
--public-network 10.0.0.0/24     ceph对外的外部网络
--cluster-network 10.0.0.0/24    ceph的内部网络

[root@ceph-admin my-cluster]# ceph-deploy new  --public-network 10.0.0.0/24 --cluster-network 10.0.0.0/24 ceph-admin
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy new --public-network 10.0.0.0/24 --cluster-network 10.0.0.0/24 ceph-admin
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  func                          : <function new at 0x14a3140>
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x15089e0>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  ssh_copykey                   : True
[ceph_deploy.cli][INFO  ]  mon                           : ['ceph-admin']
[ceph_deploy.cli][INFO  ]  public_network                : 10.0.0.0/24
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  cluster_network               : 10.0.0.0/24
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  fsid                          : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[ceph-admin][DEBUG ] connected to host: ceph-admin 
[ceph-admin][DEBUG ] detect platform information from remote host
[ceph-admin][DEBUG ] detect machine type
[ceph-admin][DEBUG ] find the location of an executable
[ceph-admin][INFO  ] Running command: /usr/sbin/ip link show
[ceph-admin][INFO  ] Running command: /usr/sbin/ip addr show
[ceph-admin][DEBUG ] IP addresses found: [u'10.0.0.60']
[ceph_deploy.new][DEBUG ] Resolving host ceph-admin
[ceph_deploy.new][DEBUG ] Monitor ceph-admin at 10.0.0.60
[ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph-admin']
[ceph_deploy.new][DEBUG ] Monitor addrs are [u'10.0.0.60']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...


配置节点后生成了3个文件:
[root@ceph-admin my-cluster]# ll
total 12
-rw-r--r-- 1 root root  256 Oct 10 00:21 ceph.conf             # 配置文件
-rw-r--r-- 1 root root 3034 Oct 10 00:21 ceph-deploy-ceph.log  # 日志文件
-rw------- 1 root root   73 Oct 10 00:21 ceph.mon.keyring      # 密钥文件,作为身份验证

[root@ceph-admin my-cluster]# cat ceph.conf 
[global]
fsid = ce3bead3-55ca-4b88-9dff-0c7dd4db1880
public_network = 10.0.0.0/24         #外部网络
cluster_network = 10.0.0.0/24        #内部网络
mon_initial_members = ceph-admin
mon_host = 10.0.0.60
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx


#手动安装节点避免更改源到国外导致安装失败:
# 安装必备软件包[每个节点都需要安装]:
 yum install -y ceph ceph-mon ceph-mgr ceph-radosgw ceph-mds
 
 
 #mon节点 初始化:
 [root@ceph-admin my-cluster]# ceph-deploy mon create-initial
 初始化结束后出现下面这些配置文件:
 [root@ceph-admin my-cluster]# ll
total 44
-rw------- 1 root root   113 Oct 10 00:33 ceph.bootstrap-mds.keyring
-rw------- 1 root root   113 Oct 10 00:33 ceph.bootstrap-mgr.keyring
-rw------- 1 root root   113 Oct 10 00:33 ceph.bootstrap-osd.keyring
-rw------- 1 root root   113 Oct 10 00:33 ceph.bootstrap-rgw.keyring
-rw------- 1 root root   151 Oct 10 00:33 ceph.client.admin.keyring
-rw-r--r-- 1 root root   256 Oct 10 00:21 ceph.conf
-rw-r--r-- 1 root root 16135 Oct 10 00:33 ceph-deploy-ceph.log
-rw------- 1 root root    73 Oct 10 00:21 ceph.mon.keyring

配置文件还需要推送到各个节点,所以需要进行推送:
[root@ceph-admin my-cluster]# ceph-deploy admin ceph-admin ceph-node1 ceph-node2 ceph-node3


#此时ceph基本配置完毕,通过ceph-s查看状态:
[root@ceph-admin my-cluster]# ceph -s
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_OK                  #  cluster 配置完毕
 
  services:
    mon: 1 daemons, quorum ceph-admin (age 4m)    #mon节点有一个
    mgr: no daemons active             #mgr还没有添加
    osd: 0 osds: 0 up, 0 in            #osd资源池暂持还没添加
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     


#配置manager监控节点[这个节点可以部署到ceph-admin也可以其他节点,这里部署到了node1]:
[root@ceph-admin my-cluster]# ceph-deploy mgr create ceph-node1
...
...
...
ceph-node1][DEBUG ] create path recursively if it doesn't exist
ceph-node1][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/cephbootstrap-mgr/ceph.keyring auth get-or-create mgr.ceph-node1 mon allow profile mgr osd allow * mds allow * - /var/lib/ceph/mgr/ceph-ceph-node1/keyring
ceph-node1][INFO  ] Running command: systemctl enable ceph-mgr@ceph-node1
ceph-node1][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@ceph-node1.servie to /usr/lib/systemd/system/ceph-mgr@.service.
ceph-node1][INFO  ] Running command: systemctl start ceph-mgr@ceph-node1  #执行日志
ceph-node1][INFO  ] Running command: systemctl enable ceph.target

#部署完成mgr后进行检查:
[root@ceph-admin my-cluster]# ceph -s
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph-admin (age 10m)
    mgr: ceph-node1(active, since 79s)   #<----- 这里可以看到 mgr部署到了 ceph-node1 
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     



#添加磁盘:
 每台虚拟机加1块20G磁盘通过下面指令发现磁盘:
echo "- - -" > /sys/class/scsi_host/host0/scan
echo "- - -" > /sys/class/scsi_host/host1/scan
echo "- - -" > /sys/class/scsi_host/host2/scan
执行过程:
[root@ceph-admin my-cluster]# echo "- - -" > /sys/class/scsi_host/host0/scan
[root@ceph-admin my-cluster]# echo "- - -" > /sys/class/scsi_host/host1/scan
[root@ceph-admin my-cluster]# echo "- - -" > /sys/class/scsi_host/host2/scan
[root@ceph-admin my-cluster]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0   20G  0 disk 
├─sda1            8:1    0  200M  0 part /boot
└─sda2            8:2    0 19.8G  0 part 
  └─centos-root 253:0    0 19.8G  0 lvm  /
sdb               8:16   0   20G  0 disk      #新增磁盘
sr0              11:0    1  4.2G  0 rom  


ceph-deploy osd create ceph-admin --data /dev/sdb
ceph-deploy osd create ceph-node1 --data /dev/sdb
ceph-deploy osd create ceph-node2 --data /dev/sdb
ceph-deploy osd create ceph-node3 --data /dev/sdb

4块磁盘加入进去后检查状态:
[root@ceph-admin my-cluster]# ceph -s
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_OK
 
  services:
    mon: 1 daemons, quorum ceph-admin (age 19m)
    mgr: ceph-node1(active, since 10m)
    osd: 4 osds: 4 up (since 4s), 4 in (since 4s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   4.0 GiB used, 76 GiB / 80 GiB avail    #刚好组成80G磁盘
    pgs:     

[root@ceph-admin my-cluster]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME           STATUS REWEIGHT PRI-AFF 
-1       0.07794 root default                                
-3       0.01949     host ceph-admin                         
 0   hdd 0.01949         osd.0           up  1.00000 1.00000 
-5       0.01949     host ceph-node1                         
 1   hdd 0.01949         osd.1           up  1.00000 1.00000 
-7       0.01949     host ceph-node2                         
 2   hdd 0.01949         osd.2           up  1.00000 1.00000 
-9       0.01949     host ceph-node3                         
 3   hdd 0.01949         osd.3           up  1.00000 1.00000 
 
 
 
 #到这里一个基础得ceph集群已经部署完毕了,它包含了 1个管理节点  一个 mgr节点  4个osd节点
 
 
 
 #mon节点扩容
 注意:mon节点扩容以奇数为准进行扩容  1 3 5这样,所以要扩容则需要扩容两台,可以弄 node1 node2 配置为mon节点
 [root@ceph-admin my-cluster]# ceph-deploy mon add ceph-node1 --address 10.0.0.61
 [root@ceph-admin my-cluster]# ceph-deploy mon add ceph-node2 --address 10.0.0.62
 [root@ceph-admin my-cluster]# ceph-deploy mon add ceph-node3 --address 10.0.0.63
 检查:
 [root@ceph-admin my-cluster]# ceph -s 
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-admin,ceph-node1,ceph-node2 (age 2s)    #可以看到添加2个mon节点后,mon节点数量到达3
    mgr: ceph-node1(active, since 21m)      #mgr 只有一个,下面进行mgr扩容
    osd: 4 osds: 4 up (since 10m), 4 in (since 10m)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   4.0 GiB used, 76 GiB / 80 GiB avail
    pgs:     
 检查:
[root@ceph-admin my-cluster]# ceph mon stat
e4: 4 mons at {ceph-admin=[v2:10.0.0.60:3300/0,v1:10.0.0.60:6789/0],ceph-node1=[v2:10.0.0.61:3300/0,v1:10.0.0.61:6789/0],ceph-node2=[v2:10.0.0.62:3300/0,v1:10.0.0.62:6789/0],ceph-node3=[v2:10.0.0.63:3300/0,v1:10.0.0.63:6789/0]}, election epoch 16, leader 0 ceph-admin, quorum 0,1,2,3 ceph-admin,ceph-node1,ceph-node2,ceph-node3

 检查:
[root@ceph-admin my-cluster]# ceph mon dump
dumped monmap epoch 4
epoch 4
fsid ce3bead3-55ca-4b88-9dff-0c7dd4db1880
last_changed 2020-10-10 01:10:00.536702
created 2020-10-10 00:33:06.013571
min_mon_release 14 (nautilus)
0: [v2:10.0.0.60:3300/0,v1:10.0.0.60:6789/0] mon.ceph-admin
1: [v2:10.0.0.61:3300/0,v1:10.0.0.61:6789/0] mon.ceph-node1
2: [v2:10.0.0.62:3300/0,v1:10.0.0.62:6789/0] mon.ceph-node2
3: [v2:10.0.0.63:3300/0,v1:10.0.0.63:6789/0] mon.ceph-node3



节点扩容会有仲裁阶段可以通过命令查看:
ceph quorum_status --format json-pretty
ceph quorum_status --format json-pretty|grep quorum_leader_name



 #mgr节点扩容:
[root@ceph-admin my-cluster]# ceph-deploy mgr create ceph-node1 ceph-node2 ceph-node3
[root@ceph-admin my-cluster]# ceph -s
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_OK
 
  services:
    mon: 4 daemons, quorum ceph-admin,ceph-node1,ceph-node2,ceph-node3 (age 8m)
    mgr: ceph-node1(active, since 36m), standbys: ceph-node2, ceph-node3   # 看到主从节点,node1是主节点,在node1异常,其他会顶替
    osd: 4 osds: 4 up (since 25m), 4 in (since 25m)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   4.0 GiB used, 76 GiB / 80 GiB avail
    pgs:     

此时已经部署完毕了一套高可用集群
主要有 4个mon节点  3个mgr节点

ceph三大存储方式

块存储 [使用较多]:

#创建和使用块存储


#创建pool

通常在创建pool之前,需要覆盖默认的pg_num,官方推荐:
若少于5个OSD, 设置pg_num为128
5~10个OSD,设置pg_num为512
10~50个OSD,设置pg_num为4096

[为了演示扩容缩容num,这里设置为64,后续扩容改为128]
[root@ceph-admin my-cluster]# ceph osd pool create ceph-pool 64 64
pool 'ceph-pool' created


#查看:
[root@ceph-admin my-cluster]# ceph osd lspools                     #查看资源池
1 ceph-pool
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool pg_num   #查看PG
pg_num: 64
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool pgp_num  #查看PGP
pgp_num: 64
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool size     #查看副本数
size: 3
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool crush_rule   #查看股则
crush_rule: replicated_rule


扩容缩容:
[root@ceph-admin my-cluster]# ceph osd pool set ceph-pool size 2   #修改副本数为2个
set pool 1 size to 2  
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool size     #检查副本数
size: 2

[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool pg_num    #查看pg数
pg_num: 64 
[root@ceph-admin my-cluster]# ceph osd pool set ceph-pool pg_num 128    #修改pg数  #修改了pg,同时也需要需改pgp,使得他们保持一致
set pool 1 pg_num to 128
[root@ceph-admin my-cluster]# ceph osd pool set ceph-pool pgp_num 128   #修改pgp数
set pool 1 pgp_num to 128



#设置pool大小 :
方式1:
[root@ceph-admin my-cluster]# rbd create -p ceph-pool --image rbd_test.img --size 10G    #10G的pool

方式2:
rbd create ceph-pool/rbd_test1.img --size 10G

查看:
[root@ceph-admin my-cluster]# rbd -p ceph-pool ls
rbd_test.img
rbd_test1.img

[root@ceph-admin my-cluster]# rbd info ceph-pool/rbd_test.img    #查看这个块设备得详细信息
rbd image 'rbd_test.img':
	size 10 GiB in 2560 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 12c09e1a0dcd
	block_name_prefix: rbd_data.12c09e1a0dcd
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
	op_features: 
	flags: 
	create_timestamp: Sat Oct 10 01:41:24 2020
	access_timestamp: Sat Oct 10 01:41:24 2020
	modify_timestamp: Sat Oct 10 01:41:24 2020
	
[root@ceph-admin my-cluster]# rbd info ceph-pool/rbd_test1.img   #查看这个块设备得详细信息
rbd image 'rbd_test1.img': 
	size 10 GiB in 2560 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 12d0a7da2dfa
	block_name_prefix: rbd_data.12d0a7da2dfa
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten  #这些信息可以去除
	op_features: 
	flags: 
	create_timestamp: Sat Oct 10 01:43:40 2020
	access_timestamp: Sat Oct 10 01:43:40 2020
	modify_timestamp: Sat Oct 10 01:43:40 2020


#去除features 信息:
rbd feature disable ceph-pool/rbd_test1.img  deep-flatten
rbd feature disable ceph-pool/rbd_test1.img  fast-diff
rbd feature disable ceph-pool/rbd_test1.img exclusive-lock

[root@ceph-admin my-cluster]# rbd info ceph-pool/rbd_test1.img
rbd image 'rbd_test1.img':
	size 10 GiB in 2560 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 119d1e259330
	block_name_prefix: rbd_data.119d1e259330
	format: 2
	features: layering   #这里的信息已经清除了
	op_features: 
	flags: 
	create_timestamp: Sun Oct 11 00:27:40 2020
	access_timestamp: Sun Oct 11 00:27:40 2020
	modify_timestamp: Sun Oct 11 00:27:40 2020


重点:
#### 注意 只有把features信息改为 layering 才可以去挂载这个设备
#挂载为块设备命令:
[root@ceph-admin my-cluster]# rbd map ceph-pool/rbd_test1.img
/dev/rbd0
[root@ceph-admin my-cluster]# rbd device list
id pool       namespace image           snap   device    
0  ceph-pool            rbd_test1.img   -      /dev/rbd0 

这样就已经将rbd_test1.img映射为了一个块设备,这个块设备是: /dev/rbd0 




#有2个块设备,删除一个的方法:
[root@ceph-admin my-cluster]# rbd rm -p ceph-pool --image rbd_test1.img
Removing image: 100% complete...done.

检查:
[root@ceph-admin my-cluster]# rbd -p ceph-pool ls 
rbd_test.img


# 如果在其他服务器映射过,可以在那台服务器执行取消;
rbd unmap /dev/rbd0


# 挂载块设备:
# 做了 映射为块设备这个步骤后,ceph就会在服务器上创建一个块设备硬盘:
# 它可以被当作一块普通硬盘来格式化使用
[root@ceph-admin my-cluster]# fdisk -l

Disk /dev/sda: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000aef55

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048      411647      204800   83  Linux
/dev/sda2          411648    41943039    20765696   8e  Linux LVM

Disk /dev/mapper/centos-root: 21.3 GB, 21260926976 bytes, 41525248 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/rbd0: 10.7 GB, 10737418240 bytes, 20971520 sectors    ### 块设备硬盘,通过lsblk也能看到
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes


创建好了块设备需要进行挂载,挂载方式如下:

本地挂载:
1. 格式化:
[root@ceph-admin my-cluster]# mkfs.ext4 /dev/rbd0      #格式化
mke2fs 1.42.9 (28-Dec-2013)
Discarding device blocks: done                            
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=1024 blocks, Stripe width=1024 blocks
655360 inodes, 2621440 blocks
131072 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2151677952
80 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks: 
	32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done 

#生产环境建议: resize2fs  /dev/rbd0   进行格式化,这样不会格式化原有磁盘数据。


2. 挂载:
[root@ceph-admin my-cluster]# mkdir /rbd-demo                  #创建一个目录
[root@ceph-admin my-cluster]# mount /dev/rbd0 /rbd-demo        #挂载到这个目录
[root@ceph-admin my-cluster]# df -h                            #检查
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   20G  1.8G   19G  10% /
devtmpfs                 2.0G     0  2.0G   0% /dev
tmpfs                    2.0G     0  2.0G   0% /dev/shm
tmpfs                    2.0G  8.7M  2.0G   1% /run
tmpfs                    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/sda1                197M  103M   95M  53% /boot
tmpfs                    394M     0  394M   0% /run/user/0
/dev/rbd0                9.8G   37M  9.2G   1% /rbd-demo

3. 写入开机自动挂载:
3.1 找到UUID
[root@ceph-admin my-cluster]# blkid 
/dev/sr0: UUID="2017-09-06-10-51-00-00" LABEL="CentOS 7 x86_64" TYPE="iso9660" PTTYPE="dos"
/dev/sda1: UUID="cb322c86-f37a-49e8-86a0-924d9b0c719e" TYPE="xfs" 
/dev/sda2: UUID="4YLVja-jT6q-FphK-3E60-5Qde-Z3gw-zLpcYT" TYPE="LVM2_member" 
/dev/mapper/centos-root: UUID="e3508edd-30c0-4e7d-9d8f-622c8dbd174e" TYPE="xfs" 
/dev/rbd0: UUID="f7abce4f-6a51-4c65-bf89-99eec90869ec" TYPE="ext4" 

3.2 写入fstab:
echo "UUID="f7abce4f-6a51-4c65-bf89-99eec90869ec"  /rbd-demo  xfs defaults 0 0" >>/etc/fstab




远程挂载:
如: ceph-client 需要挂载这个磁盘:

服务器操作:
 rbd create ceph-pool/rbd_test2.img --size 5G
 rbd pool init ceph-pool
 ceph-deploy admin [ceph-client name]        #ceph-client name指的客户端主机名,这里的主机名和IP需要在admin节点hosts文件中,否则会报错]
                                             #执行后会生成一个 ceph.client.admin.keyring 文件,用于作为挂载服务器RBD凭据
 
客户端操作:
1. 安装ceph工具
yum install -y ceph-common

2. 获取ceph密钥
 将服务器的 ceph.client.admin.keyring内容写入到客户机的该文件中,或者新建文件,将内容写入到文件中.
 
3, 执行映射块设备
rbd map ceph-pool/rbd_test1.img
取消这个映射可以执行: rbd unmap /dev/rbd0  [rbd0是相对于映射的设备而言,可能名称不一样]

4. 挂载到目录:
   mkdir /root/rdb0 -p
   resize2fs  /dev/rbd0 
   mount  /dev/rbd0 /root/rdb0




#扩容:
[root@ceph-client ~]# rbd resize ceph-pool/rbd_test1.img --size 15G
Resizing image: 100% complete...done.

检查大小变化:
[root@ceph-client ~]# df -Th
Filesystem              Type      Size  Used Avail Use% Mounted on
/dev/rbd0               ext4      9.8G   37M  9.2G   1% /root/rdb0    #大小10G 还没改变

执行格式化:
[root@ceph-client ~]# resize2fs  /dev/rbd0     #不损耗数据的格式化
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/rbd0 is mounted on /root/rdb0; on-line resizing required
old_desc_blocks = 2, new_desc_blocks = 2
The filesystem on /dev/rbd0 is now 3932160 blocks long.

再次查看:
[root@ceph-client ~]# df -Th
Filesystem              Type      Size  Used Avail Use% Mounted on
/dev/rbd0               ext4       15G   41M   14G   1% /root/rdb0    # 已经增加了大小
posted @ 2020-10-11 03:39  陈雷雷  阅读(789)  评论(0编辑  收藏  举报