ceph api 和部署文档参考
hostnamectl set-hostname ceph1.cn
hostnamectl set-hostname ceph2.cn
hostnamectl set-hostname ceph3.cn
vi /etc/hosts
192.168.130.81 ceph1
192.168.130.82 ceph2
192.168.130.83 ceph3
scp /etc/hosts 192.168.130.83:/etc/hosts
ssh-keygen
ssh-copy-id ceph1
ssh-copy-id ceph2
ssh-copy-id ceph3
vi /etc/yum.repos.d/ceph.repo
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/SRPMS
gpgcheck=0
priority=1
scp /etc/yum.repos.d/ceph.repo ceph2:/etc/yum.repos.d/ceph.repo
yum clean all ; yum makecache
systemctl stop firewalld ;systemctl disable firewalld ; iptables -F ;setenforce 0
rm /etc/localtime #删除link
ln -vs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime #软链接link
yum install ntpdate -y
ntpdate time.nist.gov
mkfs.xfs /dev/sdb -f
mkdir /var/data1
mount /dev/sdb /var/data1
echo "/dev/sdb /var/data1 xfs defaults 0 0" >> /etc/fstab #开机挂载
mount -a
mkfs.xfs /dev/sdb -f
mkdir /var/data2
mount /dev/sdb /var/data2
echo "/dev/sdb /var/data2 xfs defaults 0 0" >> /etc/fstab
mount -a
mkfs.xfs /dev/sdb -f
mkdir /var/data3.1
mount /dev/sdb /var/data3.1
echo "/dev/sdb /var/data3.1 xfs defaults 0 0" >> /etc/fstab
root@ceph205 ~]# yum -y install ceph-deploy
创建monitor服务
[root@ceph205 ~]# mkdir /etc/ceph && cd /etc/ceph
[root@ceph205 ceph]# ceph-deploy new ceph1 #安装控制节点到ceph205
[root@ceph205 ~]# vi /etc/ceph/ceph.conf #设置服务器默认存储两份也可以正常读写
osd_pool_default_size = 2
public_network= 192.168.130.0/24 # 为多mon做准备
ceph-deploy install ceph1 ceph2 ceph3
cd /etc/ceph
ceph-deploy mon create ceph1
ceph-deploy gatherkeys ceph1
ceph-deploy osd prepare ceph1:/var/data1 ceph2:/var/data2 ceph3:/var/data3.1 # 准备使用的硬盘
chmod 777 -R /var/data1
chmod 777 -R /var/data2
chmod 777 -R /var/data3.1
cd /etc/ceph/
ceph-deploy admin ceph1 ceph2 ceph3
各个节点操作给认证读取权限
chmod +r /etc/ceph/ceph.client.admin.keyring
激活osd 服务
ceph-deploy osd activate ceph1:/var/data1 ceph2:/var/data2 ceph3:/var/data3.1
ceph-deploy mds create ceph2 ceph3
ceph mds stat
ceph -s
rados df#查看使用率
ceph osd pool create cephdata 128
ceph osd pool create cephdata2 128
ceph fs new 128 cephdata cephdata2
ceph-deploy install --rgw ceph1 ceph-deploy admin ceph1 ceph-deploy rgw create ceph1
ceph-deploy --overwrite-conf config push ceph1 ceph2 ceph3
systemctl list-unit-files | grep ceph
systemctl restart ceph-radosgw.target
http://192.168.130.81:5000/admin/usage?format=json%20HTTP/1.1
ceph-rest-api -c /etc/ceph/ceph.conf -n client.admin --cluster ceph
ceph-rest-api -c /etc/ceph/ceph.conf -n client.admin --cluster ceph & #后台运行
api 接口和相关参考文档
http://docs.ceph.org.cn/start/quick-rgw/#id1
https://www.cnblogs.com/zengzhihua/p/9829472.html
https://cloud.tencent.com/developer/article/1032867
http://docs.ceph.org.cn/man/8/ceph-rest-api/
http://192.168.130.81:5000/api/v0.1/
http://192.168.130.81:5000/api/v0.1/osd/pool/get?pool=rbd&var=size
http://docs.ceph.org.cn/radosgw/
maypen
ceph rest api启用
1、在ceph的monitor服务器端。
修改vi /etc/ceph/ceph.conf下增加
[global]
public addr = 1.2.0.0: 40000
2、如果1中不修改,则默认是0.0.0.0:5000
3、启用ceph-rest-api
ceph-rest-api -n client.admin
对象存储上传下载
https://blog.csdn.net/lwl971963866/article/details/76833826