ceph 008 ceph多区域网关(ceph对象容灾) cephfs文件系统

clienta作为集群的管理人员。一部分。他是需要秘钥与配置文件的
但真正服务端只需要通过curl就好

ceph 多区域网关

对象存储容灾解决方案

zone与zone会做数据同步。
把会做同步的rgw放到一个组里
同步有方向的概念
有主zone  master
主zone会像其他zone同步
一个realm的数据可以同步(元数据)
我在realm创建一个bucket。那么realm下面的zonegroup也会看到
zone不可以跨集群
修改元数据得经过主zone,然后同步到其他zone


提交更改时

实践

官方文档
https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/5/html/object_gateway_guide/advanced-configuration#migrating-a-single-site-system-to-multi-site-rgw

[student@workstation ~]$ lab start object-radosgw

删除原有rgw

1.创建realm

[root@clienta ~]# radosgw-admin realm create --rgw-realm=myrealm --default
{
    "id": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "name": "myrealm",
    "current_period": "b6d342c0-ae0b-4c59-9594-75e2cba281e0",
    "epoch": 1
}

[root@clienta ~]# radosgw-admin realm list
{
    "default_info": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "realms": [
        "myrealm"
    ]
}
[root@clienta ~]# 

默认有zone,zonegroup (default)。不用默认有的

2.创建zonegroup

[root@clienta ~]# radosgw-admin zonegroup create --rgw-realm=myrealm --rgw-zonegroup=myzonegroup --master  --default
{
    "id": "543c6500-a4d8-4653-9329-88d8e6b70fe3",
    "name": "myzonegroup",
    "api_name": "myzonegroup",
    "is_master": "true",
    "endpoints": [],
    "hostnames": [],
    "hostnames_s3website": [],
    "master_zone": "",
    "zones": [],
    "placement_targets": [],
    "default_placement": "",
    "realm_id": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "sync_policy": {
        "groups": []
    }
}
[root@clienta ~]# 

以后radosgw-admin 默认就是myzonegroup这个

3.创建zone

[root@clienta ~]# radosgw-admin zone create --rgw-realm=myrealm --rgw-zonegroup=myzonegroup --rgw-zone=myzone1 --master --default
{
    "id": "6d3b4396-1966-4b1d-a49a-3c48f908a286",
    "name": "myzone1",
    "domain_root": "myzone1.rgw.meta:root",
    "control_pool": "myzone1.rgw.control",
    "gc_pool": "myzone1.rgw.log:gc",
    "lc_pool": "myzone1.rgw.log:lc",
    "log_pool": "myzone1.rgw.log",
    "intent_log_pool": "myzone1.rgw.log:intent",
    "usage_log_pool": "myzone1.rgw.log:usage",
    "roles_pool": "myzone1.rgw.meta:roles",
    "reshard_pool": "myzone1.rgw.log:reshard",
    "user_keys_pool": "myzone1.rgw.meta:users.keys",
    "user_email_pool": "myzone1.rgw.meta:users.email",
    "user_swift_pool": "myzone1.rgw.meta:users.swift",
    "user_uid_pool": "myzone1.rgw.meta:users.uid",
    "otp_pool": "myzone1.rgw.otp",
    "system_key": {
        "access_key": "",
        "secret_key": ""
    },
    "placement_pools": [
        {
            "key": "default-placement",
            "val": {
                "index_pool": "myzone1.rgw.buckets.index",
                "storage_classes": {
                    "STANDARD": {
                        "data_pool": "myzone1.rgw.buckets.data"
                    }
                },
                "data_extra_pool": "myzone1.rgw.buckets.non-ec",
                "index_type": 0
            }
        }
    ],
    "realm_id": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "notif_pool": "myzone1.rgw.log:notif"
}
[root@clienta ~]# 

查验

[root@clienta ~]# radosgw-admin realm list
{
    "default_info": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "realms": [
        "myrealm"
    ]
}
[root@clienta ~]# radosgw-admin zone list
{
    "default_info": "6d3b4396-1966-4b1d-a49a-3c48f908a286",
    "zones": [
        "myzone1",
        "default"
    ]
}
[root@clienta ~]# radosgw-admin zonegroup list
{
    "default_info": "543c6500-a4d8-4653-9329-88d8e6b70fe3",
    "zonegroups": [
        "myzonegroup",
        "default"
    ]
}
[root@clienta ~]# radosgw-admin zonegroup get --rgw-realm=myrealm
{
    "id": "543c6500-a4d8-4653-9329-88d8e6b70fe3",
    "name": "myzonegroup",
    "api_name": "myzonegroup",
    "is_master": "true",
    "endpoints": [],
    "hostnames": [],
    "hostnames_s3website": [],
    "master_zone": "6d3b4396-1966-4b1d-a49a-3c48f908a286",
    "zones": [
        {
            "id": "6d3b4396-1966-4b1d-a49a-3c48f908a286",
            "name": "myzone1",
            "endpoints": [],
            "log_meta": "false",
            "log_data": "false",
            "bucket_index_max_shards": 11,
            "read_only": "false",
            "tier_type": "",
            "sync_from_all": "true",
            "sync_from": [],
            "redirect_zone": ""
        }
    ],
    "placement_targets": [
        {
            "name": "default-placement",
            "tags": [],
            "storage_classes": [
                "STANDARD"
            ]
        }
    ],
    "default_placement": "default-placement",
    "realm_id": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "sync_policy": {
        "groups": []
    }
}
[root@clienta ~]# 

4.提交修改

[root@clienta ~]# radosgw-admin period update --rgw-realm=myrealm --commit

5.添加rgw

[root@clienta ~]# ceph orch apply rgw test  --realm=myrealm --zone=myzone1  --placement="2 serverc.lab.example.com serverd.lab.example.com" --port=80
Scheduled rgw.test update...

修改元数据

[root@clienta ~]# radosgw-admin zonegroup modify  --rgw-realm=myrealm --rgw-zonegroup=myzonegroup --endpoints http://serverc:80 --master --default
{
"id": "543c6500-a4d8-4653-9329-88d8e6b70fe3",
"name": "myzonegroup",
"api_name": "myzonegroup",
"is_master": "true",
"endpoints": [
    "http://serverc:80"
],

6.创建系统用户

同步,备集群拉取数据的时候,得通过用户

[root@clienta ~]# radosgw-admin user create --uid=user1 --display-name="user1" --access-key=123 --secret=456 --system 

告诉zone系统账号

[root@clienta ~]# radosgw-admin zone modify  --rgw-realm=myrealm --rgw-zonegroup=myzonegroup --endpoints http://serverc:80 --access-key=123 --secret=456 --rgw-zone=myzone1 --master --default

7.更新ceph数据库配置并重启服务

[root@clienta ~]# ceph config set client.rgw.rgw.test.serverc.kpidkj rgw_realm myrealm 
[root@clienta ~]# ceph config set client.rgw.rgw.test.serverc.kpidkj rgw_zonegroup myzonegroup
[root@clienta ~]# ceph config set client.rgw.rgw.test.serverc.kpidkj rgw_zone myzone1
[root@clienta ~]# 
radosgw-admin period update --rgw-realm=myrealm --commit

在ceph集群拥有rgw节点那重启服务(在serverc上部署的rgw)

[root@serverc ~]# systemctl list-units | grep ceph | grep rgw
ceph-2ae6d05a-229a-11ec-925e-52540000fa0c@rgw.test.serverc.kpidkj.service                                        loaded    active running   Ceph rgw.test.serverc.kpidkj for 2ae6d05a-229a-11ec-925e-52540000fa0c                                           
[root@serverc ~]# systemctl restart ceph-2ae6d05a-229a-11ec-925e-52540000fa0c@rgw.test.serverc.kpidkj.service 
[root@serverc ~]# 
这是我加的rgw节点serverc

[root@clienta ~]# ceph orch restart rgw.test
Scheduled to restart rgw.test.serverc.kpidkj on host 'serverc.lab.example.com'
Scheduled to restart rgw.test.serverd.zhtcld on host 'serverd.lab.example.com'
[root@clienta ~]# 
这是整个rgw服务

Secondary zone

8.拉取realm (同步数据)

[root@serverf ~]# radosgw-admin realm pull --url=http://serverc:80 --access-key=123 --secret-key=456
2022-08-16T11:25:54.082-0400 7fe54eade380  1 error read_lastest_epoch .rgw.root:periods.7d9b13b1-e755-4b7b-908a-3deb39567dc9.latest_epoch
2022-08-16T11:25:54.183-0400 7fe54eade380  1 Set the period's master zonegroup 543c6500-a4d8-4653-9329-88d8e6b70fe3 as the default
{
    "id": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "name": "myrealm",
    "current_period": "7d9b13b1-e755-4b7b-908a-3deb39567dc9",
    "epoch": 2
}
[root@serverf ~]# 

查看

[root@serverf ~]# radosgw-admin realm list
{
    "default_info": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "realms": [
        "myrealm"
    ]
}
[root@serverf ~]# 


[root@serverf ~]# radosgw-admin zonegroup list
{
    "default_info": "543c6500-a4d8-4653-9329-88d8e6b70fe3",
    "zonegroups": [
        "myzonegroup",
        "default"
    ]
}

zone不可以跨集群,所以zone看不到

[root@serverf ~]# radosgw-admin zone list
{
    "default_info": "",
    "zones": [
        "default"
    ]
}
[root@serverf ~]# 

9.拉取配置信息

[root@serverf ~]# radosgw-admin period pull --url=http://serverc:80 --access-key=123 --secret-key=456

10.serverf创建zone

[root@serverf ~]# radosgw-admin zone create --rgw-zonegroup=myzonegroup --rgw-zone=myzone2  --endpoints=http://serverf:80  --access-key=123 --secret-key=456

查看
ceph orch ps #这里最好一条不能是error
rgw.realm.zone.serverf.qihrkc serverf.lab.example.com running (69m) 3m ago 10M *:80 16.2.0-117.el8cp 2142b60d7974 9820eb665dde
[root@serverf ~]# radosgw-admin zone list
{
"default_info": "bcb8d59c-66e6-423d-8a5b-8d5b3c2fa62b",
"zones": [
"myzone2",
"default"
]
}

11.配置数据库信息

[root@serverf ~]# ceph config set client.rgw.rgw.realm.zone.serverf.qihrkc  rgw_realm myrealm
[root@serverf ~]# ceph config set client.rgw.rgw.realm.zone.serverf.qihrkc  rgw_zonegroup myzonegroup
[root@serverf ~]# ceph config set client.rgw.rgw.realm.zone.serverf.qihrkc  rgw_zone myzone2
[root@serverf ~]# radosgw-admin period update  --commit

client.rgw这一部分是文档里的固定格式,这一部分rgw.realm.zone.serverf.qihrkc则要看ceph orch ps 的rgw信息

[root@serverf ~]# systemctl restart ceph-0bf7c358-25e1-11ec-ae02-52540000fa0f@rgw.realm.zone.serverf.qihrkc.service     
[root@serverf ~]# systemctl enable ceph-0bf7c358-25e1-11ec-ae02-52540000fa0f@rgw.realm.zone.serverf.qihrkc.service     
[root@serverf ~]# 
systemctl list-units | grep ceph

测试

配置完后,两边的用户同步了

[root@serverf ~]# radosgw-admin user list
[
    "user1"
]

yum -y install awscli
两个节点安装aws s3工具

[root@clienta ~]# radosgw-admin user create --uid=bob --display-name=bob --access-key=abc --secret=def
主创建用户
备端也会有
[root@serverf yum.repos.d]# radosgw-admin user list
[
    "user1",
    "bob"
]

[root@clienta ~]# aws configure
AWS Access Key ID [None]: abc
AWS Secret Access Key [None]: def
Default region name [None]: 
Default output format [None]: 
[root@clienta ~]# 

创建桶
[root@clienta ~]# aws --endpoint=http://serverc:80 s3 mb s3://test1
make_bucket: test1
[root@clienta ~]# aws --endpoint=http://serverc:80 s3 ls 
2022-08-17 09:37:29 test1

备集群查看自己有没有桶
[root@serverf yum.repos.d]# aws configure
AWS Access Key ID [None]: abc
AWS Secret Access Key [None]: def
Default region name [None]: 
Default output format [None]: 
[root@serverf yum.repos.d]# aws --endpoint=http://serverf:80 s3 ls
2022-08-17 09:37:29 test1
[root@serverf yum.repos.d]# 
桶被同步过来了

[root@clienta ~]# aws --endpoint=http://serverc:80 s3 cp /etc/profile s3://test1/
upload: ../etc/profile to s3://test1/profile  
上传对象

[root@serverf yum.repos.d]# aws --endpoint=http://serverf:80 s3 ls s3://test1
2022-08-17 09:42:04       2123 profile
[root@serverf yum.repos.d]# 
备节点也拥有该镜像

[root@serverf yum.repos.d]# aws --endpoint=http://serverf:80 s3 mb s3://test2
make_bucket: test2
备端创桶

[root@clienta ~]# aws --endpoint=http://serverc:80 s3 ls 
2022-08-17 09:37:29 test1
2022-08-17 09:45:34 test2
[root@clienta ~]# 
主端也会有

在备集群创建桶时,命令会被转发给主zone。主坏了,其他就可以成为主
zookeeper里只能一个写,三个都可以读,类似

[root@serverf yum.repos.d]# radosgw-admin user create --uid=boba --display-name=boba --access-key=abc --secret=def
Please run the command on master zone. Performing this operation on non-master zone leads to inconsistent metadata between zones
Are you sure you want to go ahead? (requires --yes-i-really-mean-it)
[root@serverf yum.repos.d]# 
备集群不能创建用户之类

故障切换

可以看到myzone1是主zone

[root@clienta ~]# radosgw-admin zonegroup get --rgw-realm=myrealm --rgw-zonegroup=myzonegroup
{
    "id": "543c6500-a4d8-4653-9329-88d8e6b70fe3",
    "name": "myzonegroup",
    "api_name": "myzonegroup",
    "is_master": "true",
    "endpoints": [
        "http://serverc:80"
    ],
    "hostnames": [],
    "hostnames_s3website": [],
    "master_zone": "6d3b4396-1966-4b1d-a49a-3c48f908a286",
    "zones": [
        {
            "id": "6d3b4396-1966-4b1d-a49a-3c48f908a286",
            "name": "myzone1",
            "endpoints": [
                "http://serverc:80"
            ],
            "log_meta": "false",
            "log_data": "true",
            "bucket_index_max_shards": 11,
            "read_only": "false",
            "tier_type": "",
            "sync_from_all": "true",
            "sync_from": [],
            "redirect_zone": ""
        },
        {
            "id": "bcb8d59c-66e6-423d-8a5b-8d5b3c2fa62b",
            "name": "myzone2",
            "endpoints": [
                "http://serverf:80"
            ],
            "log_meta": "false",
            "log_data": "true",
            "bucket_index_max_shards": 11,
            "read_only": "false",
            "tier_type": "",
            "sync_from_all": "true",
            "sync_from": [],
            "redirect_zone": ""
        }
    ],
    "placement_targets": [
        {
            "name": "default-placement",
            "tags": [],
            "storage_classes": [
                "STANDARD"
            ]
        }
    ],
    "default_placement": "default-placement",
    "realm_id": "084b1291-061d-4501-8ddd-bebccd4183bc",
    "sync_policy": {
        "groups": []
    }
}



[root@serverf yum.repos.d]# radosgw-admin zone modify --rgw-zone=myzone2 --master --default --read-only=false 
radosgw-admin period update --commit
更改主zone后,得提交元数据,让其他集群知道

period元数据
realm包含realm名字与zonegroup名字

cephfs 文件系统

mds管理文件系统元数据
mds服务拥有缓存提高效率
元数据存放在集群的osd中

实践

[root@clienta ~]# ceph osd pool create mycephfs_data
pool 'mycephfs_data' created
创建数据池
[root@clienta ~]# ceph osd pool create mycephfs_metadata
pool 'mycephfs_metadata' created
元数据池
[root@clienta ~]# ceph fs new mycephfs mycephfs_metadata mycephfs_data
文件系统
new fs with metadata pool 13 and data pool 12
[root@clienta ~]# ceph orch apply mds mycephfs --placement="2 serverc.lab.example.com serverd.lab.example.com"
Scheduled mds.mycephfs update...
[root@clienta ~]# ceph mds stat
mycephfs:1 {0=mycephfs.serverc.lkwfwl=up:active} 1 up:standby
[root@clienta ~]# 
一个活动的mds和一个备的

[root@clienta ~]# ceph fs status
mycephfs - 0 clients
========
RANK  STATE             MDS               ACTIVITY     DNS    INOS   DIRS   CAPS  
0    active  mycephfs.serverc.lkwfwl  Reqs:    0 /s    10     13     12      0   
    POOL          TYPE     USED  AVAIL  
mycephfs_metadata  metadata  96.0k  28.3G  
mycephfs_data      data       0   28.3G  
    STANDBY MDS        
mycephfs.serverd.epyssp  
MDS version: ceph version 16.2.0-117.el8cp (0e34bb74700060ebfaa22d99b7d2cdc037b28a57) pacific (stable)
[root@clienta ~]# 

挂载两种方式

第一种linux内核

mount -t mount -t ceph serverc.lab.example.com:/  /mnt -o name=admin,fs=mycephfs
指定mon地址,文件系统都是根/

[root@clienta /]# cd /mnt/
[root@clienta mnt]# ls
[root@clienta mnt]# mkdir dir1
[root@clienta mnt]# mkdir dir33
[root@clienta mnt]# cd
[root@clienta ~]# umount /mnt
[root@clienta ~]# ceph fs authorize mycephfs client.mqy / r /dir33 rw > /etc/ceph/ceph.client.mqy.keyring 

[root@clienta ~]# mount -t ceph serverc.lab.example.com:/  /mnt -o name=mqy,fs=mycephfs
[root@clienta ~]# cd /mnt/
[root@clienta mnt]# ls
dir1  dir33
[root@clienta mnt]# touch flag
touch: cannot touch 'flag': Permission denied
[root@clienta mnt]# cd dir33
[root@clienta dir33]# ls
[root@clienta dir33]# touch flag

永久挂载

ceph auth get-key client.mqy > /root/secret
[root@clienta ~]# mount -a
[root@clienta ~]# cat /etc/fstab  | tail -n 1
serverc.lab.example.com:/  /mnt  ceph   name=mqy,fs=mycephfs,secretfile=/root/secret,_netdev 0 0
[root@clienta ~]# df -h  
172.25.250.12:/   29G     0   29G   0% /mnt

直接挂载其中的目录

[root@clienta ~]# mount -a
[root@clienta ~]# cd /mnt/
[root@clienta mnt]# ls
flag
[root@clienta mnt]# cat /etc/fstab  | tail -n 1
serverc.lab.example.com:/dir33  /mnt  ceph   name=mqy,fs=mycephfs,secretfile=/root/secret,_netdev 0 0
[root@clienta mnt]# 

第二种ceph驱动 ceph-fuse

手动挂载

[root@clienta ~]# ceph-fuse -n client.mqy --client_fs mycephfs  /mnt
2022-08-17T10:53:23.822-0400 7f8f88d6d200 -1 init, newargv = 0x5596b7c3f9d0 newargc=15
ceph-fuse[19250]: starting ceph client
ceph-fuse[19250]: starting fuse
[root@clienta ~]# df -h /mnt/
Filesystem      Size  Used Avail Use% Mounted on
ceph-fuse        29G     0   29G   0% /mnt
[root@clienta ~]# 

永久挂载

[root@clienta ~]# tail -n 1  /etc/fstab 
serverc.lab.example.com:/  /mnt/  fuse.ceph  ceph.id=mqy,_netdev 0 0

创建多个cephfs供于挂载

[root@clienta ~]# ceph osd pool create mycephfs2_data
pool 'mycephfs2_data' created
[root@clienta ~]# ceph osd pool create mycephfs2_metadata
pool 'mycephfs2_metadata' created
[root@clienta ~]# ceph fs new mycephfs2 mycephfs2_metadata mycephfs2_data
new fs with metadata pool 15 and data pool 14
[root@clienta ~]# ceph orch apply mds mycephfs2 --placement='1 servere.lab.example.com'
Scheduled mds.mycephfs2 update...
多个mds保证可靠性
[root@clienta ~]# ceph fs status
mycephfs - 1 clients
========
RANK  STATE             MDS               ACTIVITY     DNS    INOS   DIRS   CAPS  
0    active  mycephfs.serverc.lkwfwl  Reqs:    0 /s    13     16     14      1   
    POOL          TYPE     USED  AVAIL  
mycephfs_metadata  metadata   132k  28.3G  
mycephfs_data      data       0   28.3G  
mycephfs2 - 0 clients
=========
RANK  STATE             MDS                ACTIVITY     DNS    INOS   DIRS   CAPS  
0    active  mycephfs2.servere.cbglhs  Reqs:    0 /s    10     13     12      0   
    POOL           TYPE     USED  AVAIL  
mycephfs2_metadata  metadata  96.0k  28.3G  
mycephfs2_data      data       0   28.3G  
    STANDBY MDS        
mycephfs.serverd.epyssp  
MDS version: ceph version 16.2.0-117.el8cp (0e34bb74700060ebfaa22d99b7d2cdc037b28a57) pacific (stable)
[root@clienta ~]# 
posted @ 2022-08-17 15:21  supermao12  阅读(600)  评论(0编辑  收藏  举报