Kubernets使用glusterfs存储

参考:

https://www.cnblogs.com/breezey/p/8849466.html

https://www.cnblogs.com/zhaojiedi1992/p/zhaojiedi_liunx_54_kubernates_glusterfs-heketi.html

https://www.cnblogs.com/panwenbin-logs/p/10231859.html

https://www.cnblogs.com/huangyanqi/p/8406534.html

k8s中部署Heketi和Gluster 一:https://blog.51cto.com/newfly/2134514

k8s中部署Heketi和Gluster 二:https://blog.51cto.com/newfly/2139393

环境

IP地址 部署程序
192.168.10.11 GlusterFS,Heketi
192.168.10.12 GlusterFS
192.168.10.13 GlusterFS

Gluster安装

# yum 安装Gluster
yum install -y centos-release-gluster 
yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma

#启动Gluster
systemctl start glusterd && systemctl enable glusterd && systemctl status glusterd

在任意节点发现其他节点

gluster peer probe 192.168.10.12
gluster peer probe 192.168.10.13

gluster peer status		#查看集群状态
Number of Peers: 2

Hostname: 192.168.10.12
Uuid: 80d5879f-ba7a-4a9b-817c-9ebb05d7086e
State: Peer in Cluster (Connected)

Hostname: 192.168.10.13
Uuid: 7b0f53c9-296b-4924-b347-3dabfc224118
State: Peer in Cluster (Connected)

安装Heketi

yum install heketi heketi-client -y

# 配置heketi用户能够基于SSH秘钥认证的方式连接至GlusterFS集群各节点

ssh-keygen -f /etc/heketi/heketi_key -t rsa -N ''
chown heketi:heketi  /etc/heketi/heketi*
ssh-copy-id -i /etc/heketi/heketi_key.pub root@192.168.10.11
ssh-copy-id -i /etc/heketi/heketi_key.pub root@192.168.10.12
ssh-copy-id -i /etc/heketi/heketi_key.pub root@192.168.10.13

#验证:ssh -i /etc/heketi/heketi_key root@192.168.10.13

# heketi的主配置文件/etc/heketi/heketi.json,定义服务监听的端口、认证及连接Gluster存储集群的方式
cat <<EOF> /etc/heketi/heketi.json
{
  "port": "18080",
  "use_auth": false,

  "jwt": {
    "admin": {
      "key": "adminSecret"
    },
    "user": {
      "key": "userSecret"
    }
  },

  "glusterfs": {
    "executor": "ssh",
    "sshexec": {
      "keyfile": "/etc/heketi/heketi_key",
      "user": "root",
      "port": "22",
      "fstab": "/etc/fstab"
    },
    "db": "/var/lib/heketi/heketi.db",
    "loglevel" : "debug"
  }
}
EOF

#启动heketi服务
systemctl start heketi && systemctl enable heketi && systemctl status heketi

# 测试
curl http://192.168.10.11:18080/hello
# 返回:Hello from Heketi

设置Heketi拓扑

指定具体的节点定义在同一个集群中,并指明各节点用于提供存储的设备

cat <<EOF> /etc/heketi/topolgy_demo.json
{
  "clusters": [
    {
      "nodes": [
         {
           "node": {
               "hostnames": {
                   "manage": [
                      "192.168.10.11"
                    ],
                   "storage": [
                   "192.168.10.11"
                    ]
               },
          "zone": 1
         },
         "devices": [
            "/dev/sdb"
              ]
           },

         {  "node": {
               "hostnames": {
                   "manage": [
                      "192.168.10.12"
                    ],
                   "storage": [
                   "192.168.10.12"
                    ]
               },
          "zone": 1
         },
         "devices": [
            "/dev/sdb"
              ]
           },
         { "node": {
               "hostnames": {
                   "manage": [
                      "192.168.10.13"
                    ],
                   "storage": [
                   "192.168.10.13"
                    ]
               },
          "zone": 1
         },
         "devices": [
            "/dev/sdb"
              ]
           }     
        ]
     }
   ]
}
EOF

#加载拓扑信息
export HEKETI_CLI_SERVER=http://192.168.10.11:18080
echo $HEKETI_CLI_SERVER

heketi-cli topology load --json=topolgy_demo.json
#显示如下
Creating cluster ... ID: d1475a413956ce3187ad83299ec39ef7
        Allowing file volumes on cluster.
        Allowing block volumes on cluster.
        Creating node 192.168.10.11 ... ID: 8ab4d5f236904f353c8acd4a7a721a76
                Adding device /dev/sdb ... OK
        Creating node 192.168.10.12 ... ID: 82377f431888e99ac2a2460257ee97a6
                Adding device /dev/sdb ... OK
        Creating node 192.168.10.13 ... ID: 8dc8e9eb476f23c49d7f22b78a660cc6
                Adding device /dev/sdb ... OK

# 根据生产的随机Cluster ID 查看集群状态
heketi-cli cluster info d1475a413956ce3187ad83299ec39ef7
#显示如下
Cluster id: d1475a413956ce3187ad83299ec39ef7
Nodes:
82377f431888e99ac2a2460257ee97a6
8ab4d5f236904f353c8acd4a7a721a76
8dc8e9eb476f23c49d7f22b78a660cc6
Volumes:

Block: true

File: true

测试

# 创建测试使用的存储卷
heketi-cli volume create --size=5
##显示如下
Cluster id: d1475a413956ce3187ad83299ec39ef7
Nodes:
82377f431888e99ac2a2460257ee97a6
8ab4d5f236904f353c8acd4a7a721a76
8dc8e9eb476f23c49d7f22b78a660cc6
Volumes:

Block: true

File: true
[root@kube11 glusterd]# heketi-cli volume create --size=5
Name: vol_ada73b178c205db2bea66bca91d578c3
Size: 5
Volume Id: ada73b178c205db2bea66bca91d578c3
Cluster Id: d1475a413956ce3187ad83299ec39ef7
Mount: 192.168.10.12:vol_ada73b178c205db2bea66bca91d578c3	#客户端挂载的地址
Mount Options: backup-volfile-servers=192.168.10.11,192.168.10.13
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distribute Count: 1
Replica Count: 3

# 客户端挂载存储卷
yum install centos-release-gluster -y
yum install glusterfs glusterfs-fuse -y

mount -t glusterfs 192.168.10.12:vol_ada73b178c205db2bea66bca91d578c3 /mnt	##创建volume信息中的Mount信息
#也可以写到 /etc/fstab 文件里
df -h	#df -h 就可以查到挂载的文件
文件系统                                                                                容量  已用  可用 已用% 挂载点
192.168.10.12:vol_ada73b178c205db2bea66bca91d578c3                                      5.0G   84M  5.0G    2% /mnt

heketi-cli volume delete ada73b178c205db2bea66bca91d578c3	#测试完成后删除存储卷

k8s中使用GlusterFS

k8s中使用glusterfs的时候, 会根据pvc的申请自动创建对应的pv, 然后绑定。 这样我们在pod里面就可以指定具体的pvc了。

#创建一个StorageClass
cat <<EOF> gluster-storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: glusterfs                       #-------------存储类的名字
provisioner: kubernetes.io/glusterfs
parameters:
  resturl: "http://192.168.10.11:18080"      #-------------heketi 的ip 和端口
  restuser: "admin"                           #-------------heketi的认证用户,这里随便填,因为没有启用鉴权模式 
  gidMin: "40000"
  gidMax: "50000"
  volumetype: "replicate:3"                 #-------------申请的默认为3副本模式,因为目前有三个gluster节点。
EOF
kubectl apply -f  gluster-storage-class.yaml

# 创建PVC
cat <<EOF> gluster-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: gluster1
  namespace: myapp
  annotations:
    volume.beta.kubernetes.io/storage-class: glusterfs    #----------上面创建的存储类的名称
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 5Gi
EOF
kubectl apply -f gluster-pvc.yaml 
#个pvc对应一个brick,一个brick对应一个LV
kubectl get pvc -n myapp
NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
gluster1   Bound    pvc-f07c6e74-2790-11ea-bf97-000c29d937a0   5Gi        RWX            glusterfs      15s

vgs
  VG                                  #PV #LV #SN Attr   VSize   VFree  
  centos                                1   2   0 wz--n- <49.00g      0 
  vg_800718e6161e0bfbcf2e0105efcdb664   1   2   0 wz--n-  19.87g <14.82g
    
lvs
 LV                                     VG                                  Attr       LSize   Pool                                Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root                                   centos                              -wi-ao---- <47.00g                                                                                   
  swap                                   centos                              -wi-ao----   2.00g                                                                                   
  brick_1cb2402e5d74c004cffd40f57f5cb2c7 vg_800718e6161e0bfbcf2e0105efcdb664 Vwi-aotz--   5.00g tp_dca1f5af68eeb304921c5bcf8f382308        0.24                                   
  tp_dca1f5af68eeb304921c5bcf8f382308    vg_800718e6161e0bfbcf2e0105efcdb664 twi-aotz--   5.00g                                            0.24   10.14 

创建pod验证

# 创建pod验证
cat <<EOF> heketi-nginx.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod1
  namespace: myapp
  labels:
    name: nginx-pod1
spec:
  containers:
  - name: nginx-pod1
    image: nginx:latest
    ports:
    - name: web
      containerPort: 80
    volumeMounts:
    - name: gluster-vol1
      mountPath: /usr/share/nginx/html
  volumes:
  - name: gluster-vol1
    persistentVolumeClaim:
      claimName: gluster1   #上面创建的pvc
EOF

kubectl apply -f heketi-nginx.yaml 

kubectl get pod -n myapp -o wide
NAME                        READY   STATUS    RESTARTS   AGE     IP             NODE            NOMINATED NODE   READINESS GATES

nginx-pod1                  1/1     Running   0          18s     172.30.98.14   192.168.10.11   <none>           <none>

kubectl exec -it nginx-pod1 /bin/sh

cd /usr/share/html/
echo '192.168.10.11' >> index.html
touch fana.txt	#写个文件,过会,就会在所有glusterfs节点 查看到

# 在其他机器访问
curl http://172.30.98.14
#显示:192.168.10.11

#登录其他glusterfs机器
df -h  #会出现/var/lib/heketi/mounts/里挂载的目录
/dev/mapper/vg_6e72843352751e3822bb13b55464f7cc-brick_2434b52740c2a485d057f5f39ca54628  5.0G   33M  5.0G    1% /var/lib/heketi/mounts/vg_6e72843352751e3822bb13b55464f7cc/brick_2434b52740c2a485d057f5f39ca54628

#cd 进去会看到 index.html fana.txt 文件
posted @ 2019-12-26 14:00  博客萨斯州  阅读(537)  评论(0编辑  收藏  举报