docker 部署ceph

https://zhang.ge/5136.html
https://m.linuxidc.com/Linux/2017-10/148041.htm

systemctl stop firewalld.service && systemctl disable firewalld.service && iptables -F &&setenforce 0
hostnamectl set-hostname node67.cn
cat >> /etc/hosts <<EOF
192.168.1.66 node66
192.168.1.67 node67
192.168.1.68 node68
EOF

#在192.168.1.66(node66)上执行:
ssh-keygen
ssh-copy-id node67
ssh-copy-id node68

docker 安装
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum install docker-ce docker-ce-cli containerd.io -y
systemctl start docker && systemctl enable docker.service && systemctl status docker

cat >> /etc/docker/daemon.json <<EOF
{ "registry-mirrors": ["https://rncxm540.mirror.aliyuncs.com"] }
EOF

systemctl daemon-reload && systemctl restart docker

三台操作
docker pull ceph/daemon:latest
时间同步
vi sj.sh
sh sj.sh

rm /etc/localtime # 删除link
ln -vs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime # 软件link
yum install ntpdate -y
ntpdate time.nist.gov

调整内核参数
cat >> /etc/sysctl.conf << EOF
kernel.pid_max=4194303
vm.swappiness = 0
EOF
sysctl -p

# read_ahead, 通过数据预读并且记载到随机访问内存方式提高磁盘读操作,根据一些Ceph的公开分享,8192是比较理想的值
echo "8192" > /sys/block/sda/queue/read_ahead_kb

# I/O Scheduler,关于I/O Scheculder的调整,简单说SSD要用noop,SATA/SAS使用deadline。x换成使用的盘符
echo "deadline" > /sys/block/sd[x]/queue/scheduler
echo "noop" > /sys/block/sd[x]/queue/scheduler

编辑别名
echo 'alias ceph="docker exec mon ceph"' >> /etc/profile
source /etc/profile

1.启动mon
vi /root/start_mon.sh

#!/bin/bash
docker run -d --net=host \
--name=mon \
--restart=always \
-v /etc/localtime:/etc/localtime \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph:/var/lib/ceph \
-e MON_IP=192.168.1.66,192.168.1.67,192.168.1.68 \
-e CEPH_PUBLIC_NETWORK=192.168.1.0/24 \
ceph/daemon:latest mon

 

ceph -s

修改配置
cat >> /etc/ceph/ceph.conf <<EOF
# 容忍更多的时钟误差
mon clock drift allowed = 2
mon clock drift warn backoff = 30
# 允许删除pool
mon_allow_pool_delete = true

[mgr]
# 开启WEB仪表盘
mgr modules = da
EOF
操作两台修改名字就可以
ssh root@node67 mkdir -p /var/lib/ceph
scp -r /etc/ceph root@node67:/etc
scp -r /var/lib/ceph/bootstrap* root@node67:/var/lib/ceph

scp start_mon.sh root@node67:/root

67node和68node执行脚本
sh start_mon.sh
ceph -s

mon: 3 daemons, quorum node66,node67,node68 (age 9s)

osd创建

mkfs.xfs -f /dev/sdb
mkdir -p /data1
cat >> /etc/fstab <<EOF
/dev/sdb /data1 xfs defaults 0 0
EOF
mount -a

ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring#不执行下面会报错

vi osd.sh

docker run -d \
--name=osd \
--net=host \
--restart=always \
--privileged=true \
--pid=host \
-v /etc/localtime:/etc/localtime \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph:/var/lib/ceph \
-v /dev/:/dev/ \
-v /data1/osd:/var/lib/ceph/osd \
ceph/daemon osd

 

docker exec mon rados df

sh osd.sh
scp osd.sh root@node67:/root

 

#!/bin/bash
sudo docker run -d --net=host \
--name=mgr \
--restart=always \
--privileged=true \
-v /etc/localtime:/etc/localtime \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph:/var/lib/ceph \
ceph/daemon:latest mgr

 

#!/bin/bash
sudo docker run -d \
--net=host \
--name=mds \
--restart=always \
--privileged=true \
-v /etc/localtime:/etc/localtime \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-e CEPHFS_CREATE=0 \
-e CEPHFS_METADATA_POOL_PG=512 \
-e CEPHFS_DATA_POOL_PG=512 \
ceph/daemon:latest mds
# 创建data pool
ceph osd pool create cephfs_data 128 128

# 创建 metadata pool
ceph osd pool create cephfs_metadata 32 32

# 创建 cephfs
ceph fs new iyuwnei cephfs_metadata cephfs_data

# 查看信息
ceph fs ls

 


name: iyunwei, metadata pool: cephfs_metadata, data pools: [cephfs_data ]


#!/bin/bash
docker run -d \
--net=host \
--name=rgw \
--restart=always \
--privileged=true \
-v /etc/localtime:/etc/localtime \
-v /data/ceph/etc:/etc/ceph \
-v /data/ceph/lib:/var/lib/ceph \
ceph/daemon:latest rgw

#!/bin/bash
docker run -d \
--net=host \
--name=rbd \
--restart=always \
--privileged=true \
-v /etc/localtime:/etc/localtime \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph:/var/lib/ceph \
ceph/daemon:latest rbd_mirror

 

单台快速部署docker参考

docker network create --driver bridge --subnet 172.20.0.0/16 ceph-network
docker network inspect ceph-network
# 2. 删除旧的ceph相关容器
docker rm -f $(docker ps -a | grep ceph | awk '{print $1}')
# 3. 清理旧的ceph相关目录文件,加入有的话
rm -rf /www/ceph /var/lib/ceph/  /www/osd/
# 4. 创建相关目录及修改权限,用于挂载volume
mkdir -p /www/ceph /var/lib/ceph/osd /www/osd/
chown -R 64045:64045 /var/lib/ceph/osd/
chown -R 64045:64045 /www/osd/
# 5. 创建monitor节点
docker run -itd --name monnode --network ceph-network --ip 172.20.0.10 -e MON_NAME=monnode -e MON_IP=172.20.0.10 -v /www/ceph:/etc/ceph ceph/mon
# 6. 在monitor节点上标识3个osd节点
docker exec monnode ceph osd create
docker exec monnode ceph osd create
docker exec monnode ceph osd create
# 7. 创建OSD节点
docker run -itd --name osdnode0 --network ceph-network -e CLUSTER=ceph -e WEIGHT=1.0 -e MON_NAME=monnode -e MON_IP=172.20.0.10 -v /www/ceph:/etc/ceph -v /www/osd/0:/var/lib/ceph/osd/ceph-0 ceph/osd 
docker run -itd --name osdnode1 --network ceph-network -e CLUSTER=ceph -e WEIGHT=1.0 -e MON_NAME=monnode -e MON_IP=172.20.0.10 -v /www/ceph:/etc/ceph -v /www/osd/1:/var/lib/ceph/osd/ceph-1 ceph/osd
docker run -itd --name osdnode2 --network ceph-network -e CLUSTER=ceph -e WEIGHT=1.0 -e MON_NAME=monnode -e MON_IP=172.20.0.10 -v /www/ceph:/etc/ceph -v /www/osd/2:/var/lib/ceph/osd/ceph-2 ceph/osd
# 8. 增加monitor节点,组件成集群
docker run -itd --name monnode_1 --network ceph-network --ip 172.20.0.11 -e MON_NAME=monnode_1 -e MON_IP=172.20.0.11 -v /www/ceph:/etc/ceph ceph/mon
docker run -itd --name monnode_2 --network ceph-network --ip 172.20.0.12 -e MON_NAME=monnode_2 -e MON_IP=172.20.0.12 -v /www/ceph:/etc/ceph ceph/mon
# 9. 创建gateway节点
docker run -itd --name gwnode --network ceph-network --ip 172.20.0.9 -p 9080:80 -e RGW_NAME=gwnode -v /www/ceph:/etc/ceph ceph/radosgw
# 10. 查看ceph集群状态
sleep 10 && docker exec monnode ceph -s

 

查看状态

18 docker exec monnode rados df
19 docker exec monnode ceph -s

 

posted @ 2019-05-01 17:44  夜辰雪扬  阅读(1237)  评论(0)    收藏  举报