二、使用ceph-deploy部署ceph集群

1、实验环境

系统版本:ubuntu 18.04.5 LTS 
内核参数:4.15.0-112-generic
ceph版本:pacific/16.2.7
主机分配:
#部署服务器ceph-deploy
192.168.1.200/192.168.88.200  ceph-deploy
#两个ceph-mgr 管理服务器
192.168.1.201/192.168.88.201  ceph-mgr1 ceph-rgw2
192.168.1.202/192.168.88.202  ceph-mgr2 ceph-rgw2
#三台服务器作为ceph 集群Mon 监视服务器,每台服务器可以和ceph 集群的cluster 网络通信。
192.168.1.203/192.168.88.203  ceph-mon1 ceph-mds1
192.168.1.204/192.168.88.204  ceph-mon2 ceph-mds2
192.168.1.205/192.168.88.205  ceph-mon3 ceph-mds3
#四台服务器作为ceph 集群OSD 存储服务器,每台服务器支持两个网络,public 网络针对客户端访问,cluster 网络用于集群管理及数据同步,每台三块或以上的磁盘
192.168.1.206/192.168.88.206  ceph-node1
192.168.1.207/192.168.88.207  ceph-node2
192.168.1.208/192.168.88.208  ceph-node3
192.168.1.209/192.168.88.209  ceph-node4
#磁盘划分
#/dev/sdb /dev/sdc /dev/sdd  #10G

2、系统环境初始化

2.1、所有节点配置清华源

cat >/etc/apt/source.list<<EOF
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
EOF

2.2、为所有节点安装常用工具

apt install iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev gcc openssh-server iotop unzip zip  apt-transport-https ca-certificates curl software-properties-common -y

2.3、所有节点的内核配置

cat >/etc/sysctl.conf <<EOF
# Controls source route verification 
net.ipv4.conf.default.rp_filter = 1 
net.ipv4.ip_nonlocal_bind = 1 
net.ipv4.ip_forward = 1 
  
# Do not accept source routing 
net.ipv4.conf.default.accept_source_route = 0 
  
# Controls the System Request debugging functionality of the kernel 
kernel.sysrq = 0 
  
# Controls whether core dumps will append the PID to the core filename. 
# Useful for debugging multi-threaded 
applications. kernel.core_uses_pid = 1 
  
# Controls the use of TCP syncookies 
net.ipv4.tcp_syncookies = 1 
  
# Disable netfilter on bridges. 
net.bridge.bridge-nf-call-ip6tables = 0 
net.bridge.bridge-nf-call-iptables = 0 
net.bridge.bridge-nf-call-arptables = 0 
  
# Controls the default maxmimum size of a mesage queue 
kernel.msgmnb = 65536 
  
# # Controls the maximum size of a message, in bytes 
kernel.msgmax = 65536 
  
# Controls the maximum shared segment size, in bytes 
kernel.shmmax = 68719476736 
  
# # Controls the maximum number of shared memory segments, in pages 
kernel.shmall = 4294967296 
  
# TCP kernel paramater 
net.ipv4.tcp_mem = 786432 1048576 1572864 
net.ipv4.tcp_rmem = 4096        87380   4194304 
net.ipv4.tcp_wmem = 4096        16384   4194304 n
et.ipv4.tcp_window_scaling = 1 
net.ipv4.tcp_sack = 1 
  
# socket buffer 
net.core.wmem_default = 8388608 
net.core.rmem_default = 8388608 
net.core.rmem_max = 16777216 
net.core.wmem_max = 16777216 
net.core.netdev_max_backlog = 262144 
net.core.somaxconn = 20480 
net.core.optmem_max = 81920 
  
# TCP conn 
net.ipv4.tcp_max_syn_backlog = 262144 
net.ipv4.tcp_syn_retries = 3 
net.ipv4.tcp_retries1 = 3 
net.ipv4.tcp_retries2 = 15 
  
# tcp conn reuse 
net.ipv4.tcp_timestamps = 0 
net.ipv4.tcp_tw_reuse = 0 
net.ipv4.tcp_tw_recycle = 0 
net.ipv4.tcp_fin_timeout = 1 
  
  
net.ipv4.tcp_max_tw_buckets = 20000 
net.ipv4.tcp_max_orphans = 3276800 
net.ipv4.tcp_synack_retries = 1 
net.ipv4.tcp_syncookies = 1 
  
# keepalive conn 
net.ipv4.tcp_keepalive_time = 300 
net.ipv4.tcp_keepalive_intvl = 30 
net.ipv4.tcp_keepalive_probes = 3 
net.ipv4.ip_local_port_range = 10001    65000 
  
# swap 
vm.overcommit_memory = 0 
vm.swappiness = 10 
  
#net.ipv4.conf.eth1.rp_filter = 0 
#net.ipv4.conf.lo.arp_ignore = 1 
#net.ipv4.conf.lo.arp_announce = 2 
#net.ipv4.conf.all.arp_ignore = 1 
#net.ipv4.conf.all.arp_announce = 2 
EOF

2.4、所有节点的文件权限配置

cat > /etc/security/limits.conf <<EOF
root                soft    core            unlimited 
root                hard    core            unlimited 
root                soft    nproc           1000000 
root                hard    nproc           1000000 
root                soft    nofile          1000000 
root                hard    nofile          1000000 
root                soft    memlock         32000 
root                hard    memlock         32000 
root                soft    msgqueue        8192000 
root                hard    msgqueue        8192000 
  
 
*                soft    core            unlimited 
*                hard    core            unlimited 
*                soft    nproc           1000000 
*                hard    nproc           1000000 
*                soft    nofile          1000000 
*                hard    nofile          1000000 
*                soft    memlock         32000 
*                hard    memlock         32000 
*                soft    msgqueue        8192000 
*                hard    msgqueue        8192000 
EOF

记得重启服务器

2.5、所有节点同步时间配置

#安装cron并启动
apt install cron -y
systemctl status cron.service
#同步时间
/usr/sbin/ntpdate time1.aliyun.com &> /dev/null && hwclock -w
#每5分钟同步一次时间
 echo "*/5 * * * * /usr/sbin/ntpdate time1.aliyun.com &> /dev/null && hwclock -w" >> /var/spool/cron/crontabs/root

2.6、所有节点配置hosts

cat >> /etc/hosts << EOF
192.168.1.200 ceph-deploy
192.168.1.201 ceph-mgr1 ceph-rgw1
192.168.1.202 ceph-mgr2 ceph-rgw2
192.168.1.203 ceph-mon1 ceph-mds1
192.168.1.204 ceph-mon2 ceph-mds2
192.168.1.205 ceph-mon3 ceph-mds3
192.168.1.206 ceph-node1
192.168.1.207 ceph-node2
192.168.1.208 ceph-node3
192.168.1.209 ceph-node4
EOF

2.7、所有节点安装python2.7

ceph初始化时,需要用到python2.7

apt install python2.7 -y
ln -sv /usr/bin/python2.7 /usr/bin/python2

 3、ceph集群部署

3.1、为所有节点配置ceph apt仓库,并导入key

#配置ceph仓库
sudo echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main" >> /etc/apt/sources.list
#导入key
wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
#更新仓库源
apt update

3.2、所有节点创建ceph用户,并允许ceph 用户以 执行特权命令:

推荐使用指定的普通用户部署和运行ceph 集群,普通用户只要能以非交互方式执行命令执行一些特权命令即可,新版的ceph-deploy 可以指定包含root 的在内只要可以执行 命令的用户,不过仍然推荐使用普通用户,比如ceph、cephuser、cephadmin 这样的用户去管理ceph 集群

#这里使用cephadmin用户

groupadd -r -g 2022 cephadmin && useradd -r -m -s /bin/bash -u 2022 -g 2022 cephadmin && echo cephadmin:123456 | chpasswd

# 允许用户以特权身份执行命令

echo "cephadmin ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers

3.3、配置免密登录

在ceph-deploy 节点配置允许以非交互的方式登录到各ceph node/mon/mgr 节点,即在ceph-deploy 节点的ceph用户生成秘钥对,然后分发公钥到各被管理节点的ceph用户

# 1.创建ssh密钥对
cephadmin@ceph-deploy:~$ ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/home/cephadmin/.ssh/id_rsa): 
Created directory '/home/cephadmin/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/cephadmin/.ssh/id_rsa.
Your public key has been saved in /home/cephadmin/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:NpcJMRHsniqjckUYAYRt2tUK6yEik/9xE17Q4K5VjTY cephadmin@ceph-deploy
The key's randomart image is:
+---[RSA 2048]----+
|++.. ..+=o       |
|. = ..o o=       |
| = * ..oE .      |
|B = o..ooo o     |
|o= o .o+S.+      |
|  o oo+.oo       |
|   o.o o         |
|. . + .          |
| o.. o           |
+----[SHA256]-----+
cephadmin@ceph-deploy:~$

# 2.安装sshpass
cephadmin@ceph-deploy:~$ sudo apt install sshpass
Reading package lists... Done
Building dependency tree       
Reading state information... Done
The following NEW packages will be installed:
  sshpass
0 upgraded, 1 newly installed, 0 to remove and 164 not upgraded.
Need to get 10.5 kB of archives.
After this operation, 30.7 kB of additional disk space will be used.
Get:1 https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic/universe amd64 sshpass amd64 1.06-1 [10.5 kB]
Fetched 10.5 kB in 1s (16.6 kB/s)                             
Selecting previously unselected package sshpass.
(Reading database ... 72596 files and directories currently installed.)
Preparing to unpack .../sshpass_1.06-1_amd64.deb ...
Unpacking sshpass (1.06-1) ...
Setting up sshpass (1.06-1) ...
Processing triggers for man-db (2.8.3-2ubuntu0.1) ...

# 3.ceph-deploy节点使用cephadmin用户分发密钥脚本
#!/bin/bash
#目标主机列表
IP="
192.168.1.200
192.168.1.201
192.168.1.202
192.168.1.203
192.168.1.204 
192.168.1.205
192.168.1.206
192.168.1.207
192.168.1.208
192.168.1.209"
for node in ${IP};do
 sshpass -p 123456 ssh-copy-id cephadmin@${node}  -o StrictHostKeyChecking=no &> /dev/null
  if [ $? -eq 0 ];then
    echo "${node}----> 密钥分发success完成"
  else
    echo "${node}----> 密钥分发false失败"
  fi
done

# 4.使用脚本分发ssh密钥
cephadmin@ceph-deploy:~$ bash ssh_fenfa.sh 
192.168.1.200----> 密钥分发success完成
192.168.1.201----> 密钥分发success完成
192.168.1.202----> 密钥分发success完成
192.168.1.203----> 密钥分发success完成
192.168.1.204----> 密钥分发success完成
192.168.1.205----> 密钥分发success完成
192.168.1.206----> 密钥分发success完成
192.168.1.207----> 密钥分发success完成
192.168.1.208----> 密钥分发success完成
192.168.1.209----> 密钥分发success完成

3.4、在ceph-deploy节点部署ceph-deploy工具包

cephadmin@ceph-deploy:~$ sudo apt-cache madison ceph-deploy
ceph-deploy |      2.0.1 | https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic/main amd64 Packages
ceph-deploy |      2.0.1 | https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic/main i386 Packages
ceph-deploy | 1.5.38-0ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic/universe amd64 Packages
ceph-deploy | 1.5.38-0ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic/universe i386 Packages
cephadmin@ceph-deploy:~$ sudo apt install ceph-deploy

3.5、初始化mon节点

在管理节点初始化mon节点

cephadmin@ceph-deploy:~$ mkdir ceph-cluster  #保存当前集群的初始化配置信息
cephadmin@ceph-deploy:~$ cd ceph-cluster/  
cephadmin@ceph-deploy:~/ceph-cluster$ 

前期只先初始化ceph-mon1节点,ceph-mon2和ceph-mon3在集群部署完成后,再手动添加

cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy new --cluster-network 192.168.88.0/24 --public-network 192.168.1.0/24 ceph-mon1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/cephadmin/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy new --cluster-network 192.168.88.0/24 --public-network 192.168.1.0/24 ceph-mon1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f90f814fe60>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  ssh_copykey                   : True
[ceph_deploy.cli][INFO  ]  mon                           : ['ceph-mon1']
[ceph_deploy.cli][INFO  ]  func                          : <function new at 0x7f90f5407b50>
[ceph_deploy.cli][INFO  ]  public_network                : 192.168.1.0/24
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  cluster_network               : 192.168.88.0/24
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  fsid                          : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[ceph-mon1][DEBUG ] connected to host: ceph-deploy 
[ceph-mon1][INFO  ] Running command: ssh -CT -o BatchMode=yes ceph-mon1
[ceph_deploy.new][WARNIN] could not connect via SSH
[ceph_deploy.new][INFO  ] will connect again with password prompt
The authenticity of host 'ceph-mon1 (192.168.1.203)' can't be established.
ECDSA key fingerprint is SHA256:8pWk1yGdewC9FEqZOzCC10VMmCDSqDVKS7l6W3iiK6U.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'ceph-mon1' (ECDSA) to the list of known hosts.
[ceph-mon1][DEBUG ] connected to host: ceph-mon1 
[ceph-mon1][DEBUG ] detect platform information from remote host
[ceph-mon1][DEBUG ] detect machine type
[ceph_deploy.new][INFO  ] adding public keys to authorized_keys
[ceph-mon1][DEBUG ] append contents to file
[ceph-mon1][DEBUG ] connection detected need for sudo
[ceph-mon1][DEBUG ] connected to host: ceph-mon1 
[ceph-mon1][DEBUG ] detect platform information from remote host
[ceph-mon1][DEBUG ] detect machine type
[ceph-mon1][DEBUG ] find the location of an executable
[ceph-mon1][INFO  ] Running command: sudo /bin/ip link show
[ceph-mon1][INFO  ] Running command: sudo /bin/ip addr show
[ceph-mon1][DEBUG ] IP addresses found: [u'192.168.1.203', u'240e:388:9f20:5700:a00:27ff:fe04:6dcb', u'192.168.88.203']
[ceph_deploy.new][DEBUG ] Resolving host ceph-mon1
[ceph_deploy.new][DEBUG ] Monitor ceph-mon1 at 192.168.1.203
[ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph-mon1']
[ceph_deploy.new][DEBUG ] Monitor addrs are [u'192.168.1.203']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...

初始化为验证

cephadmin@ceph-deploy:~/ceph-cluster$ ll
total 20
drwxrwxr-x 2 cephadmin cephadmin 4096 Mar 30 18:28 ./
drwxr-xr-x 6 cephadmin cephadmin 4096 Mar 30 18:01 ../
-rw-rw-r-- 1 cephadmin cephadmin  266 Mar 30 18:28 ceph.conf  #自动生成的配置文件
-rw-rw-r-- 1 cephadmin cephadmin 3938 Mar 30 18:28 ceph-deploy-ceph.log  #日志文件
-rw------- 1 cephadmin cephadmin   73 Mar 30 18:28 ceph.mon.keyring  #用于ceph mon 节点内部通讯认证的秘钥环文件

cephadmin@ceph-deploy:~/ceph-cluster$ cat ceph.conf 
[global]
fsid = 164ae280-496b-4272-8d79-04f94b9a3b5a      #ceph集群ID
public_network = 192.168.1.0/24
cluster_network = 192.168.88.0/24
mon_initial_members = ceph-mon1      #可以用逗号做分割添加多个mon节点
mon_host = 192.168.1.203
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

3.6、初始化ceph-node节点

cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy install --no-adjust-repos --nogpgcheck ceph-node1 ceph-node2 ceph-node3 ceph-node4
--no-adjust-repos:不推送repo仓库
--nogpgcheck:不进行包的校验

此过程会在指定的ceph node 节点按照串行的方式逐个服务器安装epel 源和ceph 源并安装ceph所需软件

............
[ceph-node4][DEBUG ] Adding group ceph....done
[ceph-node4][DEBUG ] Adding system user ceph....done
[ceph-node4][DEBUG ] Setting system user ceph properties....done
[ceph-node4][DEBUG ] chown: cannot access '/var/log/ceph/*.log*': No such file or directory
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /lib/systemd/system/ceph.target.
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/multi-user.target.wants/rbdmap.service → /lib/systemd/system/rbdmap.service.
[ceph-node4][DEBUG ] Setting up radosgw (16.2.7-1bionic) ...
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/multi-user.target.wants/ceph-radosgw.target → /lib/systemd/system/ceph-radosgw.target.
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/ceph.target.wants/ceph-radosgw.target → /lib/systemd/system/ceph-radosgw.target.
[ceph-node4][DEBUG ] Setting up ceph-base (16.2.7-1bionic) ...
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/ceph.target.wants/ceph-crash.service → /lib/systemd/system/ceph-crash.service.
[ceph-node4][DEBUG ] Setting up ceph-mgr (16.2.7-1bionic) ...
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/multi-user.target.wants/ceph-mgr.target → /lib/systemd/system/ceph-mgr.target.
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/ceph.target.wants/ceph-mgr.target → /lib/systemd/system/ceph-mgr.target.
[ceph-node4][DEBUG ] Setting up ceph-osd (16.2.7-1bionic) ...
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/multi-user.target.wants/ceph-osd.target → /lib/systemd/system/ceph-osd.target.
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/ceph.target.wants/ceph-osd.target → /lib/systemd/system/ceph-osd.target.
[ceph-node4][DEBUG ] Setting up ceph-mds (16.2.7-1bionic) ...
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/multi-user.target.wants/ceph-mds.target → /lib/systemd/system/ceph-mds.target.
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/ceph.target.wants/ceph-mds.target → /lib/systemd/system/ceph-mds.target.
[ceph-node4][DEBUG ] Setting up ceph-mon (16.2.7-1bionic) ...
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/multi-user.target.wants/ceph-mon.target → /lib/systemd/system/ceph-mon.target.
[ceph-node4][DEBUG ] Created symlink /etc/systemd/system/ceph.target.wants/ceph-mon.target → /lib/systemd/system/ceph-mon.target.
[ceph-node4][DEBUG ] Setting up ceph (16.2.7-1bionic) ...
[ceph-node4][DEBUG ] Processing triggers for systemd (237-3ubuntu10.42) ...
[ceph-node4][DEBUG ] Processing triggers for man-db (2.8.3-2ubuntu0.1) ...
[ceph-node4][DEBUG ] Processing triggers for ureadahead (0.100.0-21) ...
[ceph-node4][DEBUG ] Processing triggers for libc-bin (2.27-3ubuntu1.2) ...
[ceph-node4][INFO  ] Running command: sudo ceph --version
[ceph-node4][DEBUG ] ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)

3.7、配置mon节点并生成同步密钥

在各mon 节点按照组件ceph-mon,并通初始化mon 节点,mon 节点ha 还可以后期横向扩容

root@ceph-mon1:~# apt install ceph-mon
root@ceph-mon2:~# apt install ceph-mon
root@ceph-mon3:~# apt install ceph-mon

在ceph-deploy节点初始化mon节点

cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy mon create-initial
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/cephadmin/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy mon create-initial
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create-initial
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fb8f4573320>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mon at 0x7fb8f4550b50>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  keyrings                      : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts ceph-mon1
[ceph_deploy.mon][DEBUG ] detecting platform for host ceph-mon1 ...
[ceph-mon1][DEBUG ] connection detected need for sudo
[ceph-mon1][DEBUG ] connected to host: ceph-mon1 
[ceph-mon1][DEBUG ] detect platform information from remote host
[ceph-mon1][DEBUG ] detect machine type
[ceph-mon1][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO  ] distro info: Ubuntu 18.04 bionic
[ceph-mon1][DEBUG ] determining if provided host has same hostname in remote
[ceph-mon1][DEBUG ] get remote short hostname
[ceph-mon1][DEBUG ] deploying mon to ceph-mon1
[ceph-mon1][DEBUG ] get remote short hostname
[ceph-mon1][DEBUG ] remote hostname: ceph-mon1
[ceph-mon1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph-mon1][DEBUG ] create the mon path if it does not exist
[ceph-mon1][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-ceph-mon1/done
[ceph-mon1][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-ceph-mon1/done
[ceph-mon1][INFO  ] creating keyring file: /var/lib/ceph/tmp/ceph-ceph-mon1.mon.keyring
[ceph-mon1][DEBUG ] create the monitor keyring file
[ceph-mon1][INFO  ] Running command: sudo ceph-mon --cluster ceph --mkfs -i ceph-mon1 --keyring /var/lib/ceph/tmp/ceph-ceph-mon1.mon.keyring --setuser 64045 --setgroup 64045
[ceph-mon1][INFO  ] unlinking keyring file /var/lib/ceph/tmp/ceph-ceph-mon1.mon.keyring
[ceph-mon1][DEBUG ] create a done file to avoid re-doing the mon deployment
[ceph-mon1][DEBUG ] create the init path if it does not exist
[ceph-mon1][INFO  ] Running command: sudo systemctl enable ceph.target
[ceph-mon1][INFO  ] Running command: sudo systemctl enable ceph-mon@ceph-mon1
[ceph-mon1][WARNIN] Created symlink /etc/systemd/system/ceph-mon.target.wants/ceph-mon@ceph-mon1.service → /lib/systemd/system/ceph-mon@.service.
[ceph-mon1][INFO  ] Running command: sudo systemctl start ceph-mon@ceph-mon1
[ceph-mon1][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-mon1.asok mon_status
[ceph-mon1][DEBUG ] ********************************************************************************
[ceph-mon1][DEBUG ] status for monitor: mon.ceph-mon1
[ceph-mon1][DEBUG ] {
[ceph-mon1][DEBUG ]   "election_epoch": 3, 
[ceph-mon1][DEBUG ]   "extra_probe_peers": [], 
[ceph-mon1][DEBUG ]   "feature_map": {
[ceph-mon1][DEBUG ]     "mon": [
[ceph-mon1][DEBUG ]       {
[ceph-mon1][DEBUG ]         "features": "0x3f01cfb9fffdffff", 
[ceph-mon1][DEBUG ]         "num": 1, 
[ceph-mon1][DEBUG ]         "release": "luminous"
[ceph-mon1][DEBUG ]       }
[ceph-mon1][DEBUG ]     ]
[ceph-mon1][DEBUG ]   }, 
[ceph-mon1][DEBUG ]   "features": {
[ceph-mon1][DEBUG ]     "quorum_con": "4540138297136906239", 
[ceph-mon1][DEBUG ]     "quorum_mon": [
[ceph-mon1][DEBUG ]       "kraken", 
[ceph-mon1][DEBUG ]       "luminous", 
[ceph-mon1][DEBUG ]       "mimic", 
[ceph-mon1][DEBUG ]       "osdmap-prune", 
[ceph-mon1][DEBUG ]       "nautilus", 
[ceph-mon1][DEBUG ]       "octopus", 
[ceph-mon1][DEBUG ]       "pacific", 
[ceph-mon1][DEBUG ]       "elector-pinging"
[ceph-mon1][DEBUG ]     ], 
[ceph-mon1][DEBUG ]     "required_con": "2449958747317026820", 
[ceph-mon1][DEBUG ]     "required_mon": [
[ceph-mon1][DEBUG ]       "kraken", 
[ceph-mon1][DEBUG ]       "luminous", 
[ceph-mon1][DEBUG ]       "mimic", 
[ceph-mon1][DEBUG ]       "osdmap-prune", 
[ceph-mon1][DEBUG ]       "nautilus", 
[ceph-mon1][DEBUG ]       "octopus", 
[ceph-mon1][DEBUG ]       "pacific", 
[ceph-mon1][DEBUG ]       "elector-pinging"
[ceph-mon1][DEBUG ]     ]
[ceph-mon1][DEBUG ]   }, 
[ceph-mon1][DEBUG ]   "monmap": {
[ceph-mon1][DEBUG ]     "created": "2022-03-30T10:43:39.124553Z", 
[ceph-mon1][DEBUG ]     "disallowed_leaders: ": "", 
[ceph-mon1][DEBUG ]     "election_strategy": 1, 
[ceph-mon1][DEBUG ]     "epoch": 1, 
[ceph-mon1][DEBUG ]     "features": {
[ceph-mon1][DEBUG ]       "optional": [], 
[ceph-mon1][DEBUG ]       "persistent": [
[ceph-mon1][DEBUG ]         "kraken", 
[ceph-mon1][DEBUG ]         "luminous", 
[ceph-mon1][DEBUG ]         "mimic", 
[ceph-mon1][DEBUG ]         "osdmap-prune", 
[ceph-mon1][DEBUG ]         "nautilus", 
[ceph-mon1][DEBUG ]         "octopus", 
[ceph-mon1][DEBUG ]         "pacific", 
[ceph-mon1][DEBUG ]         "elector-pinging"
[ceph-mon1][DEBUG ]       ]
[ceph-mon1][DEBUG ]     }, 
[ceph-mon1][DEBUG ]     "fsid": "164ae280-496b-4272-8d79-04f94b9a3b5a", 
[ceph-mon1][DEBUG ]     "min_mon_release": 16, 
[ceph-mon1][DEBUG ]     "min_mon_release_name": "pacific", 
[ceph-mon1][DEBUG ]     "modified": "2022-03-30T10:43:39.124553Z", 
[ceph-mon1][DEBUG ]     "mons": [
[ceph-mon1][DEBUG ]       {
[ceph-mon1][DEBUG ]         "addr": "192.168.1.203:6789/0", 
[ceph-mon1][DEBUG ]         "crush_location": "{}", 
[ceph-mon1][DEBUG ]         "name": "ceph-mon1", 
[ceph-mon1][DEBUG ]         "priority": 0, 
[ceph-mon1][DEBUG ]         "public_addr": "192.168.1.203:6789/0", 
[ceph-mon1][DEBUG ]         "public_addrs": {
[ceph-mon1][DEBUG ]           "addrvec": [
[ceph-mon1][DEBUG ]             {
[ceph-mon1][DEBUG ]               "addr": "192.168.1.203:3300", 
[ceph-mon1][DEBUG ]               "nonce": 0, 
[ceph-mon1][DEBUG ]               "type": "v2"
[ceph-mon1][DEBUG ]             }, 
[ceph-mon1][DEBUG ]             {
[ceph-mon1][DEBUG ]               "addr": "192.168.1.203:6789", 
[ceph-mon1][DEBUG ]               "nonce": 0, 
[ceph-mon1][DEBUG ]               "type": "v1"
[ceph-mon1][DEBUG ]             }
[ceph-mon1][DEBUG ]           ]
[ceph-mon1][DEBUG ]         }, 
[ceph-mon1][DEBUG ]         "rank": 0, 
[ceph-mon1][DEBUG ]         "weight": 0
[ceph-mon1][DEBUG ]       }
[ceph-mon1][DEBUG ]     ], 
[ceph-mon1][DEBUG ]     "stretch_mode": false, 
[ceph-mon1][DEBUG ]     "tiebreaker_mon": ""
[ceph-mon1][DEBUG ]   }, 
[ceph-mon1][DEBUG ]   "name": "ceph-mon1", 
[ceph-mon1][DEBUG ]   "outside_quorum": [], 
[ceph-mon1][DEBUG ]   "quorum": [
[ceph-mon1][DEBUG ]     0
[ceph-mon1][DEBUG ]   ], 
[ceph-mon1][DEBUG ]   "quorum_age": 1, 
[ceph-mon1][DEBUG ]   "rank": 0, 
[ceph-mon1][DEBUG ]   "state": "leader", 
[ceph-mon1][DEBUG ]   "stretch_mode": false, 
[ceph-mon1][DEBUG ]   "sync_provider": []
[ceph-mon1][DEBUG ] }
[ceph-mon1][DEBUG ] ********************************************************************************
[ceph-mon1][INFO  ] monitor: mon.ceph-mon1 is running
[ceph-mon1][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-mon1.asok mon_status
[ceph_deploy.mon][INFO  ] processing monitor mon.ceph-mon1
[ceph-mon1][DEBUG ] connection detected need for sudo
[ceph-mon1][DEBUG ] connected to host: ceph-mon1 
[ceph-mon1][DEBUG ] detect platform information from remote host
[ceph-mon1][DEBUG ] detect machine type
[ceph-mon1][DEBUG ] find the location of an executable
[ceph-mon1][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-mon1.asok mon_status
[ceph_deploy.mon][INFO  ] mon.ceph-mon1 monitor has reached quorum!
[ceph_deploy.mon][INFO  ] all initial monitors are running and have formed quorum
[ceph_deploy.mon][INFO  ] Running gatherkeys...
[ceph_deploy.gatherkeys][INFO  ] Storing keys in temp directory /tmp/tmpBrv4wZ
[ceph-mon1][DEBUG ] connection detected need for sudo
[ceph-mon1][DEBUG ] connected to host: ceph-mon1 
[ceph-mon1][DEBUG ] detect platform information from remote host
[ceph-mon1][DEBUG ] detect machine type
[ceph-mon1][DEBUG ] get remote short hostname
[ceph-mon1][DEBUG ] fetch remote file
[ceph-mon1][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.ceph-mon1.asok mon_status
[ceph-mon1][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph-mon1/keyring auth get client.admin
[ceph-mon1][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph-mon1/keyring auth get client.bootstrap-mds
[ceph-mon1][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph-mon1/keyring auth get client.bootstrap-mgr
[ceph-mon1][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph-mon1/keyring auth get client.bootstrap-osd
[ceph-mon1][INFO  ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph-mon1/keyring auth get client.bootstrap-rgw
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.client.admin.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-mds.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-mgr.keyring
[ceph_deploy.gatherkeys][INFO  ] keyring 'ceph.mon.keyring' already exists
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-osd.keyring
[ceph_deploy.gatherkeys][INFO  ] Storing ceph.bootstrap-rgw.keyring
[ceph_deploy.gatherkeys][INFO  ] Destroy temp directory /tmp/tmpBrv4wZ

3.8、验证mon节点

验证在mon 定节点已经自动安装并启动了ceph-mon 服务,并且后期在ceph-deploy 节点初始化目录会生成一些bootstrap ceph mds/mgr/osd/rgw 等服务的keyring 认证文件,这些初始化文件拥有对ceph 集群的最高权限,所以一定要保存好

root@ceph-mon1:~# ps aux | grep ceph-mon
ceph      5095  0.1  3.9 480460 39892 ?        Ssl  18:43   0:00 /usr/bin/ceph-mon -f --cluster ceph --id ceph-mon1 --setuser ceph --setgroup ceph
root      5695  0.0  0.1  11340  1084 pts/0    R+   18:44   0:00 grep --color=auto ceph-mon

3.9、分发admin密钥到node节点

在ceph-deploy 节点把配置文件和admin 密钥拷贝至Ceph 集群需要执行ceph 管理命令的节点,从而不需要后期通过ceph 命令对ceph 集群进行管理配置的时候每次都需要指定ceph-mon 节点地址和ceph.client.admin.keyring 文件,另外各ceph-mon 节点也需要同步ceph 的集群配置文件与认证文件

如果在ceph-deploy 节点管理集群

root@ceph-deploy:~# apt install ceph-common -y #先安装ceph 的公共组件
root@ceph-node1:~# apt install ceph-common -y
root@ceph-node2:~# apt install ceph-common -y
root@ceph-node3:~# apt install ceph-common -y
root@ceph-node4:~# apt install ceph-common -y

拷贝密钥到ceph-node1 ceph-node2 ceph-node3 ceph-node4

#在ceph-deploy上操作

cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy admin ceph-node1 ceph-node2 ceph-node3 ceph-node4
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/cephadmin/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy admin ceph-node1 ceph-node2 ceph-node3 ceph-node4
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fa600485410>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  client                        : ['ceph-node1', 'ceph-node2', 'ceph-node3', 'ceph-node4']
[ceph_deploy.cli][INFO  ]  func                          : <function admin at 0x7fa600d89ad0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to ceph-node1
[ceph-node1][DEBUG ] connection detected need for sudo
[ceph-node1][DEBUG ] connected to host: ceph-node1 
[ceph-node1][DEBUG ] detect platform information from remote host
[ceph-node1][DEBUG ] detect machine type
[ceph-node1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to ceph-node2
[ceph-node2][DEBUG ] connection detected need for sudo
[ceph-node2][DEBUG ] connected to host: ceph-node2 
[ceph-node2][DEBUG ] detect platform information from remote host
[ceph-node2][DEBUG ] detect machine type
[ceph-node2][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to ceph-node3
[ceph-node3][DEBUG ] connection detected need for sudo
[ceph-node3][DEBUG ] connected to host: ceph-node3 
[ceph-node3][DEBUG ] detect platform information from remote host
[ceph-node3][DEBUG ] detect machine type
[ceph-node3][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to ceph-node4
[ceph-node4][DEBUG ] connection detected need for sudo
[ceph-node4][DEBUG ] connected to host: ceph-node4 
[ceph-node4][DEBUG ] detect platform information from remote host
[ceph-node4][DEBUG ] detect machine type
[ceph-node4][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf

3.10、验证密钥

root@ceph-node1:~# ll /etc/ceph/
total 20
drwxr-xr-x  2 root root 4096 Mar 30 18:49 ./
drwxr-xr-x 93 root root 4096 Mar 30 18:35 ../
-rw-------  1 root root  151 Mar 30 18:49 ceph.client.admin.keyring
-rw-r--r--  1 root root  266 Mar 30 18:49 ceph.conf
-rw-r--r--  1 root root   92 Dec  8 00:26 rbdmap
-rw-------  1 root root    0 Mar 30 18:49 tmpUXXQIt


root@ceph-node2:~# ll /etc/ceph/
total 20
drwxr-xr-x  2 root root 4096 Mar 30 18:49 ./
drwxr-xr-x 93 root root 4096 Mar 30 18:36 ../
-rw-------  1 root root  151 Mar 30 18:49 ceph.client.admin.keyring
-rw-r--r--  1 root root  266 Mar 30 18:49 ceph.conf
-rw-r--r--  1 root root   92 Dec  8 00:26 rbdmap
-rw-------  1 root root    0 Mar 30 18:49 tmp3OQ18C


root@ceph-node3:~# ll /etc/ceph/
total 20
drwxr-xr-x  2 root root 4096 Mar 30 18:49 ./
drwxr-xr-x 93 root root 4096 Mar 30 18:37 ../
-rw-------  1 root root  151 Mar 30 18:49 ceph.client.admin.keyring
-rw-r--r--  1 root root  266 Mar 30 18:49 ceph.conf
-rw-r--r--  1 root root   92 Dec  8 00:26 rbdmap
-rw-------  1 root root    0 Mar 30 18:49 tmpZmT34F


root@ceph-node4:~# ll /etc/ceph/
total 20
drwxr-xr-x  2 root root 4096 Mar 30 18:49 ./
drwxr-xr-x 93 root root 4096 Mar 30 18:39 ../
-rw-------  1 root root  151 Mar 30 18:49 ceph.client.admin.keyring
-rw-r--r--  1 root root  266 Mar 30 18:49 ceph.conf
-rw-r--r--  1 root root   92 Dec  8 00:26 rbdmap
-rw-------  1 root root    0 Mar 30 18:49 tmpiR4BpD

3.11、在ceph-node节点对密钥授权

认证文件的属主和属组为了安全考虑,默认设置为了root 用户和root 组,如果需要ceph用户也能执行ceph 命令,那么就需要对ceph 用户进行授权。

root@ceph-node1:~# setfacl -m u:cephadmin:rw /etc/ceph/ceph.client.admin.keyring
root@ceph-node2:~# setfacl -m u:cephadmin:rw /etc/ceph/ceph.client.admin.keyring
root@ceph-node3:~# setfacl -m u:cephadmin:rw /etc/ceph/ceph.client.admin.keyring
root@ceph-node4:~# setfacl -m u:cephadmin:rw /etc/ceph/ceph.client.admin.keyring

3.12、部署ceph-mgr节点

(1)在ceph-mgr1和ceph-mgr2上安装ceph-mgr包

root@ceph-mgr1:~# apt install -y ceph-mgr
root@ceph-mgr2:~# apt install -y ceph-mgr

(2)在ceph-deploy上初始化ceph-mgr节点

#只初始化ceph-mgr01节点

cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy mgr create ceph-mgr1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/cephadmin/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy mgr create ceph-mgr1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  mgr                           : [('ceph-mgr1', 'ceph-mgr1')]
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7ff5ce1cceb0>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mgr at 0x7ff5ce62e1d0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mgr][DEBUG ] Deploying mgr, cluster ceph hosts ceph-mgr1:ceph-mgr1
The authenticity of host 'ceph-mgr1 (192.168.1.201)' can't be established.
ECDSA key fingerprint is SHA256:8pWk1yGdewC9FEqZOzCC10VMmCDSqDVKS7l6W3iiK6U.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'ceph-mgr1' (ECDSA) to the list of known hosts.
[ceph-mgr1][DEBUG ] connection detected need for sudo
[ceph-mgr1][DEBUG ] connected to host: ceph-mgr1 
[ceph-mgr1][DEBUG ] detect platform information from remote host
[ceph-mgr1][DEBUG ] detect machine type
[ceph_deploy.mgr][INFO  ] Distro info: Ubuntu 18.04 bionic
[ceph_deploy.mgr][DEBUG ] remote host will use systemd
[ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to ceph-mgr1
[ceph-mgr1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph-mgr1][WARNIN] mgr keyring does not exist yet, creating one
[ceph-mgr1][DEBUG ] create a keyring file
[ceph-mgr1][DEBUG ] create path recursively if it doesn't exist
[ceph-mgr1][INFO  ] Running command: sudo ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.ceph-mgr1 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-ceph-mgr1/keyring
[ceph-mgr1][INFO  ] Running command: sudo systemctl enable ceph-mgr@ceph-mgr1
[ceph-mgr1][WARNIN] Created symlink /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@ceph-mgr1.service → /lib/systemd/system/ceph-mgr@.service.
[ceph-mgr1][INFO  ] Running command: sudo systemctl start ceph-mgr@ceph-mgr1
[ceph-mgr1][INFO  ] Running command: sudo systemctl enable ceph.target

3.13、验证ceph-mgr节点

在ceph-mgr1上

root@ceph-mgr1:~# ps aux | grep ceph-mgr
ceph      8033  8.8 31.6 1228332 319424 ?      Ssl  18:56   0:03 /usr/bin/ceph-mgr -f --cluster ceph --id ceph-mgr1 --setuser ceph --setgroup ceph
root      8228  0.0  0.1  14436  1064 pts/0    S+   18:57   0:00 grep --color=auto ceph-mgr

3.14、配置ceph-deploy管理ceph集群

#ceph-deploy管理ceph集群环境设置

cephadmin@ceph-deploy:~/ceph-cluster$ sudo apt install -y ceph-common
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy admin ceph-deploy
cephadmin@ceph-deploy:~/ceph-cluster$ sudo setfacl -m u:cephadmin:rw /etc/ceph/ceph.client.admin.keyring

#ceph-deploy管理ceph集群信息

cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     164ae280-496b-4272-8d79-04f94b9a3b5a
    health: HEALTH_WARN
            mon is allowing insecure global_id reclaim
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph-mon1 (age 14m)
    mgr: ceph-mgr1(active, since 2m)
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:

禁用非安全模式通信

cephadmin@ceph-deploy:~/ceph-cluster$ ceph config set mon auth_allow_insecure_global_id_reclaim false
cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     164ae280-496b-4272-8d79-04f94b9a3b5a
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph-mon1 (age 16m)
    mgr: ceph-mgr1(active, since 3m)
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:

ceph集群个组件版本

cephadmin@ceph-deploy:~/ceph-cluster$ ceph version
ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)
cephadmin@ceph-deploy:~/ceph-cluster$ ceph versions
{
    "mon": {
        "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 1
    },
    "mgr": {
        "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 1
    },
    "osd": {},
    "mds": {},
    "overall": {
        "ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)": 2
    }
}

3.15、准备osd节点

#前面执行了node节点的初始化,如下步骤,后面的操作可以不执行

#擦除磁盘之前通过deploy 节点对node 节点执行安装ceph 基本运行环境。
#在ceph-deploy上操作

cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy install --release pacific ceph-node1
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy install --release pacific ceph-node2
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy install --release pacific ceph-node3
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy install --release pacific ceph-node4

3.16、列出ceph node 节点磁盘

cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy disk list ceph-node1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/cephadmin/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy disk list ceph-node1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : list
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7ff12bdb0320>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  host                          : ['ceph-node1']
[ceph_deploy.cli][INFO  ]  func                          : <function disk at 0x7ff12bd86350>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph-node1][DEBUG ] connection detected need for sudo
[ceph-node1][DEBUG ] connected to host: ceph-node1 
[ceph-node1][DEBUG ] detect platform information from remote host
[ceph-node1][DEBUG ] detect machine type
[ceph-node1][DEBUG ] find the location of an executable
[ceph-node1][INFO  ] Running command: sudo fdisk -l
[ceph-node1][INFO  ] Disk /dev/sda: 20 GiB, 21474836480 bytes, 41943040 sectors
[ceph-node1][INFO  ] Disk /dev/sdb: 10 GiB, 10737418240 bytes, 20971520 sectors
[ceph-node1][INFO  ] Disk /dev/sdc: 10 GiB, 10737418240 bytes, 20971520 sectors
[ceph-node1][INFO  ] Disk /dev/sdd: 10 GiB, 10737418240 bytes, 20971520 sectors

3.17、使用ceph-deploy disk zap擦除各ceph node的ceph数据磁盘

#在ceph-deploy上操作
 ceph-deploy  disk zap ceph-node1  /dev/sdb
 ceph-deploy  disk zap ceph-node1  /dev/sdc
 ceph-deploy  disk zap ceph-node1  /dev/sdd
 
 
 ceph-deploy  disk zap ceph-node2  /dev/sdb
 ceph-deploy  disk zap ceph-node2  /dev/sdc
 ceph-deploy  disk zap ceph-node2  /dev/sdd
 
 
 ceph-deploy  disk zap ceph-node3  /dev/sdb
 ceph-deploy  disk zap ceph-node3  /dev/sdc
 ceph-deploy  disk zap ceph-node3  /dev/sdd
 
 
 ceph-deploy  disk zap ceph-node4  /dev/sdb
 ceph-deploy  disk zap ceph-node4  /dev/sdc
 ceph-deploy  disk zap ceph-node4  /dev/sdd

3.18、添加osd

#在ceph-deploy上操作
 ceph-deploy osd create ceph-node1 --data /dev/sdb
 ceph-deploy osd create ceph-node1 --data /dev/sdc
 ceph-deploy osd create ceph-node1 --data /dev/sdd
 
 ceph-deploy osd create ceph-node2 --data /dev/sdb
 ceph-deploy osd create ceph-node2 --data /dev/sdc
 ceph-deploy osd create ceph-node2 --data /dev/sdd
 
 ceph-deploy osd create ceph-node3 --data /dev/sdb
 ceph-deploy osd create ceph-node3 --data /dev/sdc
 ceph-deploy osd create ceph-node3 --data /dev/sdd
 
 ceph-deploy osd create ceph-node4 --data /dev/sdb
 ceph-deploy osd create ceph-node4 --data /dev/sdc
 ceph-deploy osd create ceph-node4 --data /dev/sdd

3.19、验证osd

cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     164ae280-496b-4272-8d79-04f94b9a3b5a
    health: HEALTH_OK
 
  services:
    mon: 1 daemons, quorum ceph-mon1 (age 28m)
    mgr: ceph-mgr1(active, since 15m)
    osd: 12 osds: 12 up (since 12s), 12 in (since 21s)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   74 MiB used, 120 GiB / 120 GiB avail
    pgs:     1 active+clean

3.20、设置osd服务自启动

默认就已经为自启动, node 节点添加完成后,开源测试node 服务器重启后,OSD 是否会自动启动

# 验证
root@ceph-node1:~# ps -ef|grep osd
ceph        7045       1  0 19:08 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 0 --setuser ceph --setgroup ceph
ceph        8720       1  0 19:09 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 1 --setuser ceph --setgroup ceph
ceph       10386       1  0 19:09 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 2 --setuser ceph --setgroup ceph
root       10885     917  0 19:13 pts/0    00:00:00 grep --color=auto osd

root@ceph-node2:~# ps -ef|grep osd
ceph        6883       1  0 19:09 ?        00:00:02 /usr/bin/ceph-osd -f --cluster ceph --id 3 --setuser ceph --setgroup ceph
ceph        8561       1  0 19:09 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 4 --setuser ceph --setgroup ceph
ceph       10227       1  0 19:09 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 5 --setuser ceph --setgroup ceph
root       10726     905  0 19:13 pts/0    00:00:00 grep --color=auto osd

root@ceph-node3:~# ps -ef|grep osd
ceph        7096       1  0 19:09 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 6 --setuser ceph --setgroup ceph
ceph        8775       1  0 19:10 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 7 --setuser ceph --setgroup ceph
ceph       10436       1  0 19:10 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 8 --setuser ceph --setgroup ceph
root       10935     998  0 19:13 pts/0    00:00:00 grep --color=auto osd

root@ceph-node4:~# ps -ef|grep osd
ceph        6836       1  1 19:10 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 9 --setuser ceph --setgroup ceph
ceph        8508       1  0 19:10 ?        00:00:01 /usr/bin/ceph-osd -f --cluster ceph --id 10 --setuser ceph --setgroup ceph
ceph       10371       1  0 19:12 ?        00:00:00 /usr/bin/ceph-osd -f --cluster ceph --id 11 --setuser ceph --setgroup ceph
root       10872     904  0 19:13 pts/0    00:00:00 grep --color=auto osd


# 设置开机自启动
root@ceph-node1:~# systemctl enable ceph-osd@0 ceph-osd@1 ceph-osd@2
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@0.service → /lib/systemd/system/ceph-osd@.service.
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@1.service → /lib/systemd/system/ceph-osd@.service.
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@2.service → /lib/systemd/system/ceph-osd@.service.

root@ceph-node2:~# systemctl enable ceph-osd@3 ceph-osd@4 ceph-osd@5
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@3.service → /lib/systemd/system/ceph-osd@.service.
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@4.service → /lib/systemd/system/ceph-osd@.service.
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@5.service → /lib/systemd/system/ceph-osd@.service.

root@ceph-node3:~# systemctl enable ceph-osd@6 ceph-osd@7 ceph-osd@8
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@6.service → /lib/systemd/system/ceph-osd@.service.
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@7.service → /lib/systemd/system/ceph-osd@.service.
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@8.service → /lib/systemd/system/ceph-osd@.service.

root@ceph-node4:~# systemctl enable ceph-osd@9 ceph-osd@10 ceph-osd@11
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@9.service → /lib/systemd/system/ceph-osd@.service.
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@10.service → /lib/systemd/system/ceph-osd@.service.
Created symlink /etc/systemd/system/ceph-osd.target.wants/ceph-osd@11.service → /lib/systemd/system/ceph-osd@.service.

3.21、ceph-deploy的命令

$ ceph-deploy --help
new:开始部署一个新的ceph 存储集群,并生成CLUSTER.conf 集群配置文件和keyring
认证文件。
install: 在远程主机上安装ceph 相关的软件包, 可以通过--release 指定安装的版本。
rgw:管理RGW 守护程序(RADOSGW,对象存储网关)。
mgr:管理MGR 守护程序(ceph-mgr,Ceph Manager DaemonCeph 管理器守护程序)。
mds:管理MDS 守护程序(Ceph Metadata Server,ceph 源数据服务器)。
mon:管理MON 守护程序(ceph-mon,ceph 监视器)。
gatherkeys:从指定获取提供新节点的验证keys,这些keys 会在添加新的MON/OSD/MD加入的时候使用。
disk:管理远程主机磁盘。
osd:在远程主机准备数据磁盘,即将指定远程主机的指定磁盘添加到ceph 集群作为osd
使用。
repo: 远程主机仓库管理。
admin:推送ceph 集群配置文件和client.admin 认证文件到远程主机。
config:将ceph.conf 配置文件推送到远程主机或从远程主机拷贝。
uninstall:从远端主机删除安装包。
purgedata:从/var/lib/ceph 删除ceph 数据,会删除/etc/ceph 下的内容。
purge: 删除远端主机的安装包和所有数据。
forgetkeys:从本地主机删除所有的验证keyring, 包括client.admin, monitor, bootstrap 等
认证文件。
pkg: 管理远端主机的安装包。
calamari:安装并配置一个calamari web 节点,calamari 是一个web 监控平台。

3.22、测试上传与下载数据

存取数据时,客户端必须首先连接至 RADOS 集群上某存储池,然后根据对象名称由相关的CRUSH 规则完成数据对象寻址。于是,为了测试集群的数据存取功能,这里首先创建一个用于测试的存储池 mypool,并设定其 PG 数量为 32 个

# 创建存储池
cephadmin@ceph-deploy:~/ceph-cluster$  ceph osd pool create mypool 32 32
pool 'mypool' created
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool ls
device_health_metrics
mypool
cephadmin@ceph-deploy:~/ceph-cluster$ ceph pg ls-by-pool mypool | awk '{print $1,$2,$15}'
PG OBJECTS ACTING
3.0 0 [8,4,11]p8
3.1 0 [0,4,10]p0
3.2 0 [10,6,5]p10
3.3 0 [10,4,0]p10
3.4 0 [1,7,11]p1
3.5 0 [5,2,6]p5
3.6 0 [0,11,4]p0
3.7 0 [2,6,9]p2
3.8 0 [11,1,7]p11
3.9 0 [8,11,5]p8
3.a 0 [3,9,8]p3
3.b 0 [1,10,8]p1
3.c 0 [5,0,7]p5
3.d 0 [3,7,2]p3
3.e 0 [7,11,4]p7
3.f 0 [5,8,11]p5
3.10 0 [10,7,5]p10
3.11 0 [7,4,10]p7
3.12 0 [9,3,0]p9
3.13 0 [3,9,8]p3
3.14 0 [3,10,2]p3
3.15 0 [9,6,3]p9
3.16 0 [5,1,9]p5
3.17 0 [10,5,8]p10
3.18 0 [10,6,1]p10
3.19 0 [1,4,10]p1
3.1a 0 [8,1,9]p8
3.1b 0 [6,4,10]p6
3.1c 0 [5,1,9]p5
3.1d 0 [9,4,7]p9
3.1e 0 [2,6,11]p2
3.1f 0 [11,0,5]p11
  
* NOTE: afterwards

# 上传文件
cephadmin@ceph-deploy:~/ceph-cluster$ sudo rados put msg1 /var/log/syslog --pool=mypool   # 把syslog文件上传到mypool并指定对象id为msg1

# 列出文件
cephadmin@ceph-deploy:~/ceph-cluster$ rados ls --pool=mypool
msg1

# 文件信息
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd map mypool msg1
osdmap e96 pool 'mypool' (3) object 'msg1' -> pg 3.c833d430 (3.10) -> up ([10,7,5], p10) acting ([10,7,5], p10)
表示文件放在了存储池 id为2的c833d430的PG上,10为当前PG的id, 2.10表示数据是在id为2的存储池当中id为10的PG 中存储,在线的OSD编号15,13,10,主OSD为5,活动的OSD 15,13,10,三个OSD表示数据放一共3个副本,PG中的OSD是ceph的 crush算法计算出三份数据保存在哪些 OSD

# 下载文件
cephadmin@ceph-deploy:~/ceph-cluster$  sudo rados get msg1 --pool=mypool /opt/my.txt
cephadmin@ceph-deploy:~/ceph-cluster$ ls /opt/
my.txt

# 修改文件
cephadmin@ceph-deploy:~/ceph-cluster$ sudo rados put msg1 /etc/passwd --pool=mypool
cephadmin@ceph-deploy:~/ceph-cluster$ sudo rados get msg1 --pool=mypool /opt/2.txt
cephadmin@ceph-deploy:~/ceph-cluster$ tail /opt/2.txt 
lxd:x:105:65534::/var/lib/lxd/:/bin/false
uuidd:x:106:110::/run/uuidd:/usr/sbin/nologin
dnsmasq:x:107:65534:dnsmasq,,,:/var/lib/misc:/usr/sbin/nologin
landscape:x:108:112::/var/lib/landscape:/usr/sbin/nologin
sshd:x:109:65534::/run/sshd:/usr/sbin/nologin
pollinate:x:110:1::/var/cache/pollinate:/bin/false
jack:x:1000:1000:jack,,,:/home/jack:/bin/bash
statd:x:111:65534::/var/lib/nfs:/usr/sbin/nologin
cephadmin:x:2022:2022::/home/cephadmin:/bin/bash
ceph:x:64045:64045:Ceph storage service:/var/lib/ceph:/usr/sbin/nologin

# 删除文件
cephadmin@ceph-deploy:~/ceph-cluster$ sudo rados rm msg1 --pool=mypool
cephadmin@ceph-deploy:~/ceph-cluster$ rados ls --pool=mypool
cephadmin@ceph-deploy:~/ceph-cluster$

4、扩展Ceph集群高可用

4.1、扩展ceph-mon节点

Ceph-mon 是原生具备自选举以实现高可用机制的ceph 服务,节点数量通常是奇数。

#在ceph-mon节点安装ceph-mon
root@ceph-mon2:~# apt install ceph-mon
root@ceph-mon3:~# apt install ceph-mon
#在ceph-deploy节点添加mon节点
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy mon add ceph-mon2
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy mon add ceph-mon3

ceph-mon结果验证

cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     164ae280-496b-4272-8d79-04f94b9a3b5a
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 7s)
    mgr: ceph-mgr1(active, since 26m)
    osd: 12 osds: 12 up (since 10m), 12 in (since 10m)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   74 MiB used, 120 GiB / 120 GiB avail
    pgs:     1 active+clean


#ceph-mon的状态
cephadmin@ceph-deploy:~/ceph-cluster$ ceph quorum_status --format json-pretty

{
    "election_epoch": 12,
    "quorum": [
        0,
        1,
        2
    ],
    "quorum_names": [
        "ceph-mon1",
        "ceph-mon2",
        "ceph-mon3"
    ],
    "quorum_leader_name": "ceph-mon1",
    "quorum_age": 38,
    "features": {
        "quorum_con": "4540138297136906239",
        "quorum_mon": [
            "kraken",
            "luminous",
            "mimic",
            "osdmap-prune",
            "nautilus",
            "octopus",
            "pacific",
            "elector-pinging"
        ]
    },
    "monmap": {
        "epoch": 3,
        "fsid": "164ae280-496b-4272-8d79-04f94b9a3b5a",
        "modified": "2022-03-30T11:22:33.628858Z",
        "created": "2022-03-30T10:43:39.124553Z",
        "min_mon_release": 16,
        "min_mon_release_name": "pacific",
        "election_strategy": 1,
        "disallowed_leaders: ": "",
        "stretch_mode": false,
        "tiebreaker_mon": "",
        "features": {
            "persistent": [
                "kraken",
                "luminous",
                "mimic",
                "osdmap-prune",
                "nautilus",
                "octopus",
                "pacific",
                "elector-pinging"
            ],
            "optional": []
        },
        "mons": [
            {
                "rank": 0,
                "name": "ceph-mon1",
                "public_addrs": {
                    "addrvec": [
                        {
                            "type": "v2",
                            "addr": "192.168.1.203:3300",
                            "nonce": 0
                        },
                        {
                            "type": "v1",
                            "addr": "192.168.1.203:6789",
                            "nonce": 0
                        }
                    ]
                },
                "addr": "192.168.1.203:6789/0",
                "public_addr": "192.168.1.203:6789/0",
                "priority": 0,
                "weight": 0,
                "crush_location": "{}"
            },
            {
                "rank": 1,
                "name": "ceph-mon2",
                "public_addrs": {
                    "addrvec": [
                        {
                            "type": "v2",
                            "addr": "192.168.1.204:3300",
                            "nonce": 0
                        },
                        {
                            "type": "v1",
                            "addr": "192.168.1.204:6789",
                            "nonce": 0
                        }
                    ]
                },
                "addr": "192.168.1.204:6789/0",
                "public_addr": "192.168.1.204:6789/0",
                "priority": 0,
                "weight": 0,
                "crush_location": "{}"
            },
            {
                "rank": 2,
                "name": "ceph-mon3",
                "public_addrs": {
                    "addrvec": [
                        {
                            "type": "v2",
                            "addr": "192.168.1.205:3300",
                            "nonce": 0
                        },
                        {
                            "type": "v1",
                            "addr": "192.168.1.205:6789",
                            "nonce": 0
                        }
                    ]
                },
                "addr": "192.168.1.205:6789/0",
                "public_addr": "192.168.1.205:6789/0",
                "priority": 0,
                "weight": 0,
                "crush_location": "{}"
            }
        ]
    }
}

4.2、扩展ceph-mgr节点

#在ceph-mgr2节点上安装ceph-mgr
root@ceph-mgr2:~#apt install -y ceph-mgr
#在ceph-deploy上添加ceph-mgr2
cephadmin@ceph-deploy:~/ceph-cluster$  ceph-deploy mgr create ceph-mgr2
#在ceph-deploy上同步配置文件到ceph-mgr2
cephadmin@ceph-deploy:~/ceph-cluster$  ceph-deploy admin ceph-mgr2

ceph-mgr验证

cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     164ae280-496b-4272-8d79-04f94b9a3b5a
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 2m)
    mgr: ceph-mgr1(active, since 71m), standbys: ceph-mgr2
    osd: 12 osds: 12 up (since 56m), 12 in (since 56m)
 
  data:
    pools:   1 pools, 1 pgs
    objects: 0 objects, 0 B
    usage:   74 MiB used, 120 GiB / 120 GiB avail
    pgs:     1 active+clean

5、部署cephfs

5.1、安装ceph-mds软件包

如果要使用cephFS,需要部署cephfs 服务。MDS服务与mon服务部署在一起(最好分开部署)

在所有ceph-mon上安装ceph-mds

root@ceph-mon1:~# apt install ceph-mds
root@ceph-mon2:~# apt install ceph-mds
root@ceph-mon3:~# apt install ceph-mds

5.2、在ceph-deploy上把mds服务添加到ceph集群

在ceph-deploy上操作

ceph-deploy mds create ceph-mds1
ceph-deploy mds create ceph-mds2
ceph-deploy mds create ceph-mds3

5.3、创建CephFS metadata和data存储池

# 创建cephfs-metadata
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool create cephfs-metadata 32 32
pool 'cephfs-metadata' created

# 创建cephfs-data
cephadmin@ceph-deploy:~/ceph-cluster$ ceph osd pool create cephfs-data 64 64
pool 'cephfs-data' created

# 创建一个叫mycephfs的cephFS
cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs new mycephfs cephfs-metadata cephfs-data
new fs with metadata pool 4 and data pool 5

5.4、验证CephFS

cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs ls
name: mycephfs, metadata pool: cephfs-metadata, data pools: [cephfs-data ]

cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs status mycephfs
mycephfs - 0 clients
========
RANK  STATE      MDS        ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mds1  Reqs:    0 /s    10     13     12      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata  96.0k  37.9G  
  cephfs-data      data       0   37.9G  
STANDBY MDS  
 ceph-mds2   
 ceph-mds3   
MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)

# 验证cephfs服务的状态
cephadmin@ceph-deploy:~/ceph-cluster$ ceph mds stat
mycephfs:1 {0=ceph-mds1=up:active} 2 up:standby

5.5、MDS高可用

Ceph mds(etadata service)作为ceph 的访问入口,需要实现高性能及数据备份,假设启动4个MDS 进程,设置2 个Rank。这时候有2 个MDS 进程会分配给两个Rank,还剩下2 个MDS进程分别作为另外个的备份。

为了测试,我们把ceph-mgr2复用为mds

#添加第4台mds ceph-mgr2
#在ceph-mgr2上安装ceph-mds
root@ceph-mgr2:~# apt install ceph-mds
#在ceph-deploy上把mgr2添加进mds
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy mds create ceph-mgr2

#现在mds的状态为1主3备
cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs status
mycephfs - 0 clients
========
RANK  STATE      MDS        ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mds1  Reqs:    0 /s    10     13     12      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata  96.0k  37.9G  
  cephfs-data      data       0   37.9G  
STANDBY MDS  
 ceph-mds2   
 ceph-mds3   
 ceph-mgr2   
MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)

 设置每个Rank 的备份MDS,也就是如果此Rank 当前的MDS 出现问题马上切换到另个MDS。设置备份的方法有很多,常用选项如下

mds_standby_replay:值为true 或false,true 表示开启replay 模式,这种模式下主MDS 内的数量将实时与从MDS 同步,如果主宕机,从可以快速的切换。如果为false 只有宕机的时候才去同步数据,这样会有一段时间的中断。
 
mds_standby_for_name:设置当前MDS 进程只用于备份于指定名称的MDS。
 
mds_standby_for_rank:设置当前MDS 进程只用于备份于哪个Rank,通常为Rank 编号。另外在存在之个CephFS 文件系统中,还可以使用mds_standby_for_fscid 参数来为指定不同的文件系统。
 
mds_standby_for_fscid:指定CephFS 文件系统ID,需要联合mds_standby_for_rank 生效,如果设置mds_standby_for_rank,那么就是用于指定文件系统的指定Rank,如果没有设置,就是指定文件系统的所有Rank。

5.5.1、当前cephfs集群状态

cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs status
mycephfs - 0 clients
========
RANK  STATE      MDS        ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mds1  Reqs:    0 /s    10     13     12      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata  96.0k  37.9G  
  cephfs-data      data       0   37.9G  
STANDBY MDS  
 ceph-mds2   
 ceph-mds3   
 ceph-mgr2   
MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)

5.5.2、当前文件系统状态

cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs get mycephfs
Filesystem 'mycephfs' (1)
fs_name	mycephfs
epoch	6
flags	12
created	2022-03-31T18:50:41.454911+0800
modified	2022-03-31T18:50:42.467695+0800
tableserver	0
root	0
session_timeout	60
session_autoclose	300
max_file_size	1099511627776
required_client_features	{}
last_failure	0
last_failure_osd_epoch	0
compat	compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,7=mds uses inline data,8=no anchor table,9=file layout v2,10=snaprealm v2}
max_mds	1
in	0
up	{0=54258}
failed	
damaged	
stopped	
data_pools	[5]
metadata_pool	4
inline_data	disabled
balancer	
standby_count_wanted	1
[mds.ceph-mds1{0:54258} state up:active seq 59 addr [v2:192.168.1.203:6800/1944489540,v1:192.168.1.203:6801/1944489540] compat {c=[1],r=[1],i=[7ff]}]

5.5.3、设置处于激活状态mds 的数量

目前有四个mds 服务器,但是有一个主三个备,可以优化一下部署架构,设置为为两主两备。

一般企业会把所有的mds设置为活动状态,不需要备份。

#设置同时活跃的主mds最大值为2
cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs set mycephfs max_mds 2
cephadmin@ceph-deploy:~/ceph-cluster$ ceph mds stat
mycephfs:2 {0=ceph-mds1=up:active,1=ceph-mgr2=up:active} 2 up:standby

cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs status
mycephfs - 0 clients
========
RANK  STATE      MDS        ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mds1  Reqs:    0 /s    10     13     12      0   
 1    active  ceph-mgr2  Reqs:    0 /s    10     13     11      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata   168k  37.9G  
  cephfs-data      data       0   37.9G  
STANDBY MDS  
 ceph-mds2   
 ceph-mds3   
MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)

5.5.4、MDS高可用优化

目前的状态是ceph-mds1 和ceph-mgr2 分别是active 状态,ceph-mds2和ceph-mds3 分别处于standby 状态,现在可以将ceph-mds2设置为ceph-mds1 的standby,将ceph-mds3 设置为ceph-mgr2的standby,以实现每个主都有一个固定备份角色的结构,则修改配置文件如

[global]
fsid = 164ae280-496b-4272-8d79-04f94b9a3b5a
public_network = 192.168.1.0/24
cluster_network = 192.168.88.0/24
mon_initial_members = ceph-mon1
mon_host = 192.168.1.203
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

[mds.ceph-mds2]
mds_standby_for_name = ceph-mds1
mds_standby_replay = true
[mds.ceph-mds3]
mds_standby_for_name = ceph-mgr2
mds_standby_replay = true

分发配置文件并重启mds 服务:

#分发配置文件保证各mds 服务重启有效
cephadmin@ceph-deploy:/etc/ceph$ ceph-deploy --overwrite-conf config push ceph-mon3
cephadmin@ceph-deploy:/etc/ceph$ ceph-deploy --overwrite-conf config push ceph-mon2
cephadmin@ceph-deploy:/etc/ceph$ ceph-deploy --overwrite-conf config push ceph-mon1
cephadmin@ceph-deploy:/etc/ceph$ ceph-deploy --overwrite-conf config push ceph-mgr2

#重启mds节点,先重启active节点ceph-mds1和ceph-mgr2,再重启standby节点ceph-mds2和ceph-mds3
cephadmin@ceph-mon1:~$ systemctl restart ceph-mds@ceph-mds1.service
cephadmin@ceph-mgr2:~$ systemctl restart ceph-mds@ceph-mgr2.service
cephadmin@ceph-mon2:~$ systemctl restart ceph-mds@ceph-mds2.service
cephadmin@ceph-mon3:~$ systemctl restart ceph-mds@ceph-mds3.service

验证mds

cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs status
mycephfs - 0 clients
========
RANK  STATE      MDS        ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  ceph-mds2  Reqs:    0 /s    10     13     12      0   
 1    active  ceph-mds1  Reqs:    0 /s    10     13     11      0   
      POOL         TYPE     USED  AVAIL  
cephfs-metadata  metadata   168k  37.9G  
  cephfs-data      data       0   37.9G  
STANDBY MDS  
 ceph-mgr2   
 ceph-mds3   
MDS version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)

查看active 和standby 对应关系:

cephadmin@ceph-deploy:~/ceph-cluster$ ceph fs get mycephfs
Filesystem 'mycephfs' (1)
fs_name	mycephfs
epoch	31
flags	12
created	2022-03-31T18:50:41.454911+0800
modified	2022-03-31T19:16:16.871263+0800
tableserver	0
root	0
session_timeout	60
session_autoclose	300
max_file_size	1099511627776
required_client_features	{}
last_failure	0
last_failure_osd_epoch	139
compat	compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,7=mds uses inline data,8=no anchor table,9=file layout v2,10=snaprealm v2}
max_mds	2
in	0,1
up	{0=54477,1=54459}
failed	
damaged	
stopped	
data_pools	[5]
metadata_pool	4
inline_data	disabled
balancer	
standby_count_wanted	1
[mds.ceph-mds2{0:54477} state up:active seq 6 addr [v2:192.168.1.204:6800/1714341319,v1:192.168.1.204:6801/1714341319] compat {c=[1],r=[1],i=[7ff]}]
[mds.ceph-mds1{1:54459} state up:active seq 7 addr [v2:192.168.1.203:6800/579540118,v1:192.168.1.203:6801/579540118] compat {c=[1],r=[1],i=[7ff]}]

6、部署RadosGW

将ceph-mgr1、ceph-mgr2 服务器部署为高可用的radosGW 服务

6.1、在ceph-mgr1和ceph-mgr2上安装ceph-radosgw

root@ceph-mgr1:~# apt install radosgw -y
root@ceph-mgr2:~# apt install radosgw -y

6.2、在ceph deploy 服务器将ceph-mgr1和ceph-mgr2 初始化为radosGW 服务

#注意:如果前面有做ceph.conf的配置变动,需要将ceph.conf重新拷贝到ceph-rgw1和cpeh-rgw2上
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf rgw create ceph-mgr1
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf rgw create ceph-mgr2

6.3、验证radosgw的状态

在mgr节点上查看radosgw节点信息

root@ceph-mgr1:~# ps aux | grep radosgw
ceph      2782  0.5  2.8 6277308 57248 ?       Ssl  19:22   0:00 /usr/bin/radosgw -f --cluster ceph --name client.rgw.ceph-rgw1 --setuser ceph --setgroup ceph
root      3478  0.0  0.0  14436  1068 pts/0    R+   19:25   0:00 grep --color=auto radosgw


root@ceph-mgr2:~# ps aux | grep radosgw
ceph      2907  0.4  2.7 6277280 56888 ?       Ssl  19:22   0:00 /usr/bin/radosgw -f --cluster ceph --name client.rgw.ceph-rgw2 --setuser ceph --setgroup ceph
root      3567  0.0  0.0  14436  1048 pts/0    S+   19:26   0:00 grep --color=auto radosgw

在ceph-deploy上查看集群的radosgw信息

cephadmin@ceph-deploy:~/ceph-cluster$ ceph -s
  cluster:
    id:     164ae280-496b-4272-8d79-04f94b9a3b5a
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 2h)
    mgr: ceph-mgr2(active, since 2h), standbys: ceph-mgr1
    mds: 2/2 daemons up, 2 standby
    osd: 12 osds: 12 up (since 119m), 12 in (since 24h)
    rgw: 2 daemons active (2 hosts, 1 zones)
 
  data:
    volumes: 1/1 healthy
    pools:   8 pools, 233 pgs
    objects: 230 objects, 8.5 KiB
    usage:   203 MiB used, 120 GiB / 120 GiB avail
    pgs:     233 active+clean

6.4、测试访问radosgw服务

 

cephadmin@ceph-deploy:~/ceph-cluster$ curl 192.168.1.201:7480
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>cephadmin@ceph-deploy:~/ceph-cluster$ 
cephadmin@ceph-deploy:~/ceph-cluster$ 
cephadmin@ceph-deploy:~/ceph-cluster$ curl 192.168.1.202:7480
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>cephadmin@ceph-deploy:~/ceph-cluster$

 

  

 

 

posted @ 2022-03-31 19:33  zhrx  阅读(517)  评论(0编辑  收藏  举报