Ceph 存储集群配置之示例 CEPH.CONF

[global]
fsid = {cluster-id}
mon_initial_ members = {hostname}[, {hostname}]
mon_host = {ip-address}[, {ip-address}]

#All clusters have a front-side public network.
#If you have two network interfaces, you can configure a private / cluster 
#network for RADOS object replication, heartbeats, backfill,
#recovery, etc.
public_network = {network}[, {network}]
#cluster_network = {network}[, {network}] 

#Clusters require authentication by default.
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

#Choose reasonable numbers for journals, number of replicas
#and placement groups.
osd_journal_size = {n}
osd_pool_default_size = {n}  # Write an object n times.
osd_pool_default_min size = {n} # Allow writing n copy in a degraded state.
osd_pool_default_pg num = {n}
osd_pool_default_pgp num = {n}

#Choose a reasonable crush leaf type. -- 选择合理的crush叶型
#0 for a 1-node cluster. -- 0 用于 1 节点集群
#1 for a multi node cluster in a single rack -- 1 用于单个机架中的多节点集群
#2 for a multi node, multi chassis cluster with multiple hosts in a chassis -- 2 用于多节点、多机箱集群,机箱中有多个主机
#3 for a multi node cluster with hosts across racks, etc. -- 3 用于跨机架等主机的多节点集群
osd_crush_chooseleaf_type = {n}
posted @ 2022-03-05 14:56  Varden  阅读(95)  评论(0编辑  收藏  举报