接上篇, 接下来创建存储池, 然后建立 Volume 了.
4. 创建池:
$ gluster peer probe gluster2
$ gluster peer probe gluster3
# 查看池
$ gluster pool list
5. 创建 Volume 前的目录准备工作
$ ansible glusterfs -m file -a "path=/bricks/ovirt/brick state=directory" $ ansible glusterfs -m file -a "path=/bricks/openshift/brick state=directory" $ ansible glusterfs -m file -a "path=/bricks/openstack/brick state=directory" # 这个包为了管理 selinux 方便 $ ansible glusterfs -m yum -a "name=policycoreutils-python" # 更改 selinux 标签 $ ansible glusterfs -m shell -a "semanage fcontext -a -t glusterd_brick_t /bricks/ovirt/brick" $ ansible glusterfs -m shell -a "semanage fcontext -a -t glusterd_brick_t /bricks/openshift/brick" $ ansible glusterfs -m shell -a "semanage fcontext -a -t glusterd_brick_t /bricks/openstack/brick" $ ansible glusterfs -m shell -a "restorecon -Rv /bricks"
6. 创建 Volume, 这里三种方式都用
# replica 2 with arbiter 1 # rdma 的传输方式在虚拟机上可能不支持, 这里写不写都可以 $ gluster volume create ovirt replica 3 arbiter 1 transport tcp,rdma gluster1:/bricks/ovirt/brick gluster2:/bricks/ovirt/brick gluster3:/bricks/ovirt/brick # distribute $ gluster volume create openstack gluster1:/bricks/openstack/brick gluster2:/bricks/openstack/brick gluster3:/bricks/openstack/brick # disperse $ gluster volume create openshift disperse 3 disperse-data 2 transport tcp gluster1:/bricks/openshift/brick gluster2:/bricks/openshift/brick gluster3:/bricks/openshift/brick
7. 在 DNS 上验证一下
$ yum -y install glusterfs-fuse $ cd /mnt && mkdir {ovirt,openstack,openshift} $ mount -t glusterfs gluster1:/ovirt /mnt/ovirt/ $ mount -t glusterfs gluster1:/openshift /mnt/openshift/ $ mount -t glusterfs gluster1:/openstack /mnt/openstack/ $ df -hT