mongodb8 分片集群部署
集群规划
主机规划
mongodb-01 192.168.174.100
mongodb-02 192.168.174.100
mongodb-03 192.168.174.100
端口规划
config server: 37013
shard1: 37014
shard1: 37015
shard1: 37016
mongos: 37017
目录规划
mkdir -pv /data/disk1/mongodb/shard1 /data/disk2/mongodb/shard2 /data/disk3/mongodb/shard3 /usr/local/mongodb/etc
mkdir -pv /data/mongodb/config/data /data/mongodb/mongos
chown -R mongodb:mongodb /data/disk1/mongodb /data/disk2/mongodb /data/disk3/mongodb /data/mongodb
创建运行用户
groupadd mongodb && useradd -M -N -g mongodb -s /bin/false -c "mongodb Server" mongodb
下载 mongodb
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-rhel8-8.0.4.tgz
tar xf mongodb-linux-x86_64-rhel8-8.0.4.tgz -C /usr/local/ && ln -sv /usr/local/mongodb-linux-x86_64-rhel88-8.0.4 /usr/local/mongodb
创建 Key File
openssl rand -base64 654 > /usr/local/mongodb/etc/mongo-keyfile
chmod 600 /usr/local/mongodb/etc/mongo-keyfile
chown mongodb:mongodb /usr/local/mongodb/etc/mongo-keyfile
config server
在所有节点添加配置
mongod_config.conf
cat > /usr/local/mongodb/etc/mongod_config.conf <<EOF
sharding:
clusterRole: configsvr
replication:
replSetName: "configReplSet"
net:
bindIp: 0.0.0.0
port: 37013
unixDomainSocket:
enabled: <boolean>
pathPrefix: <string>
filePermissions: <int>
storage:
dbPath: /data/mongodb/config/data
systemLog:
destination: file
path: /data/mongodb/config/config.log
processManagement:
fork: true
pidFilePath: /data/mongodb/config/configsrv.pid
timeZoneInfo: /usr/share/zoneinfo
EOF
启动3个 config server
mongod -f /usr/local/mongodb/etc/mongod_config.conf
or
cat > /lib/systemd/system/mongod-config.service <<EOF
[Unit]
Description=MongoDB Database Server
Documentation=https://docs.mongodb.org/manual
After=network-online.target
Wants=network-online.target
[Service]
User=mongodb
Group=mongodb
EnvironmentFile=-/etc/default/mongod
Environment="MONGODB_CONFIG_OVERRIDE_NOFORK=1"
Environment="GLIBC_TUNABLES=glibc.pthread.rseq=0"
ExecStart=/usr/local/mongodb/bin/mongod --config /usr/local/mongodb/etc/mongod_config.conf
RuntimeDirectory=mongodb
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for mongod as specified in
# https://docs.mongodb.com/manual/reference/ulimit/#recommended-ulimit-settings
[Install]
WantedBy=multi-user.target
EOF
systemctl enable mongod-config --now
连接 mongo server
mongosh --host 192.168.174.100 --port 37013
Current Mongosh Log ID: 675963ad963eb6c207e94969
Connecting to: mongodb://192.168.174.100:37013/?directConnection=true&appName=mongosh+2.3.4
Using MongoDB: 8.0.4
Using Mongosh: 2.3.4
For mongosh info see: https://www.mongodb.com/docs/mongodb-shell/
To help improve our products, anonymous usage data is collected and sent to MongoDB periodically (https://www.mongodb.com/legal/privacy-policy).
You can opt-out by running the disableTelemetry() command.
test>
启用配置副本集
rs.initiate(
{
_id: "configReplSet",
configsvr: true,
members: [
{ _id : 0, host : "192.168.174.100:37013" },
{ _id : 1, host : "192.168.174.101:37013" },
{ _id : 2, host : "192.168.174.102:37013 }
]
}
)
{
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1733913835, i: 1 }),
signature: {
hash: Binary.createFromBase64('AAAAAAAAAAAAAAAAAAAAAAAAAAA=', 0),
keyId: Long('0')
}
},
operationTime: Timestamp({ t: 1733913835, i: 1 })
}
configReplSet [direct: secondary] test>
查看集群状态
rs.status()
{
set: 'configReplSet',
date: ISODate('2024-12-11T10:44:51.643Z'),
myState: 1,
term: Long('1'),
syncSourceHost: '',
syncSourceId: -1,
configsvr: true,
heartbeatIntervalMillis: Long('2000'),
majorityVoteCount: 2,
writeMajorityCount: 2,
votingMembersCount: 3,
writableVotingMembersCount: 3,
optimes: {
lastCommittedOpTime: { ts: Timestamp({ t: 1733913891, i: 1 }), t: Long('1') },
lastCommittedWallTime: ISODate('2024-12-11T10:44:51.483Z'),
readConcernMajorityOpTime: { ts: Timestamp({ t: 1733913891, i: 1 }), t: Long('1') },
appliedOpTime: { ts: Timestamp({ t: 1733913891, i: 1 }), t: Long('1') },
durableOpTime: { ts: Timestamp({ t: 1733913891, i: 1 }), t: Long('1') },
writtenOpTime: { ts: Timestamp({ t: 1733913891, i: 1 }), t: Long('1') },
lastAppliedWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastDurableWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastWrittenWallTime: ISODate('2024-12-11T10:44:51.483Z')
},
lastStableRecoveryTimestamp: Timestamp({ t: 1733913835, i: 1 }),
electionCandidateMetrics: {
lastElectionReason: 'electionTimeout',
lastElectionDate: ISODate('2024-12-11T10:44:05.398Z'),
electionTerm: Long('1'),
lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 1733913835, i: 1 }), t: Long('-1') },
lastSeenWrittenOpTimeAtElection: { ts: Timestamp({ t: 1733913835, i: 1 }), t: Long('-1') },
lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1733913835, i: 1 }), t: Long('-1') },
numVotesNeeded: 2,
priorityAtElection: 1,
electionTimeoutMillis: Long('10000'),
numCatchUpOps: Long('0'),
newTermStartDate: ISODate('2024-12-11T10:44:05.412Z'),
wMajorityWriteAvailabilityDate: ISODate('2024-12-11T10:44:05.905Z')
},
members: [
{
_id: 0,
name: '192.168.174.100:37013',
health: 1,
state: 1,
stateStr: 'PRIMARY',
uptime: 837,
optime: { ts: Timestamp({ t: 1733913891, i: 1 }), t: Long('1') },
optimeDate: ISODate('2024-12-11T10:44:51.000Z'),
optimeWritten: { ts: Timestamp({ t: 1733913891, i: 1 }), t: Long('1') },
optimeWrittenDate: ISODate('2024-12-11T10:44:51.000Z'),
lastAppliedWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastDurableWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastWrittenWallTime: ISODate('2024-12-11T10:44:51.483Z'),
syncSourceHost: '',
syncSourceId: -1,
infoMessage: 'Could not find member to sync from',
electionTime: Timestamp({ t: 1733913845, i: 1 }),
electionDate: ISODate('2024-12-11T10:44:05.000Z'),
configVersion: 1,
configTerm: 1,
self: true,
lastHeartbeatMessage: ''
},
{
_id: 1,
name: '192.168.174.101:37013',
health: 1,
state: 2,
stateStr: 'SECONDARY',
uptime: 56,
optime: { ts: Timestamp({ t: 1733913890, i: 1 }), t: Long('1') },
optimeDurable: { ts: Timestamp({ t: 1733913890, i: 1 }), t: Long('1') },
optimeWritten: { ts: Timestamp({ t: 1733913890, i: 1 }), t: Long('1') },
optimeDate: ISODate('2024-12-11T10:44:50.000Z'),
optimeDurableDate: ISODate('2024-12-11T10:44:50.000Z'),
optimeWrittenDate: ISODate('2024-12-11T10:44:50.000Z'),
lastAppliedWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastDurableWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastWrittenWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastHeartbeat: ISODate('2024-12-11T10:44:51.404Z'),
lastHeartbeatRecv: ISODate('2024-12-11T10:44:50.403Z'),
pingMs: Long('0'),
lastHeartbeatMessage: '',
syncSourceHost: '10.150.253.15:37013',
syncSourceId: 0,
infoMessage: '',
configVersion: 1,
configTerm: 1
},
{
_id: 2,
name: '192.168.174.102:37013',
health: 1,
state: 2,
stateStr: 'SECONDARY',
uptime: 56,
optime: { ts: Timestamp({ t: 1733913890, i: 1 }), t: Long('1') },
optimeDurable: { ts: Timestamp({ t: 1733913890, i: 1 }), t: Long('1') },
optimeWritten: { ts: Timestamp({ t: 1733913890, i: 1 }), t: Long('1') },
optimeDate: ISODate('2024-12-11T10:44:50.000Z'),
optimeDurableDate: ISODate('2024-12-11T10:44:50.000Z'),
optimeWrittenDate: ISODate('2024-12-11T10:44:50.000Z'),
lastAppliedWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastDurableWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastWrittenWallTime: ISODate('2024-12-11T10:44:51.483Z'),
lastHeartbeat: ISODate('2024-12-11T10:44:51.404Z'),
lastHeartbeatRecv: ISODate('2024-12-11T10:44:50.404Z'),
pingMs: Long('0'),
lastHeartbeatMessage: '',
syncSourceHost: '10.150.253.15:37013',
syncSourceId: 0,
infoMessage: '',
configVersion: 1,
configTerm: 1
}
],
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1733913891, i: 1 }),
signature: {
hash: Binary.createFromBase64('AAAAAAAAAAAAAAAAAAAAAAAAAAA=', 0),
keyId: Long('0')
}
},
operationTime: Timestamp({ t: 1733913891, i: 1 })
}
创建分片副本集
在所有节点添加以下配置
mongod_shard1.conf
cat > /usr/local/mongodb/etc/mongod_shard1.conf <<EOF
security:
authorization: enabled
keyFile: /usr/local/mongodb/etc/mongo-keyfile
sharding:
clusterRole: shardsvr
replication:
replSetName: shard1
storage:
dbPath: /data/disk1/mongodb/shard1
net:
bindIp: 0.0.0.0
port: 37014
processManagement:
fork: true
EOF
mongod_shard2.conf
cat > /usr/local/mongodb/etc/mongod_shard2.conf <<EOF
security:
authorization: enabled
keyFile: /usr/local/mongodb/etc/mongo-keyfile
sharding:
clusterRole: shardsvr
replication:
replSetName: shard2
storage:
dbPath: /data/disk2/mongodb/shard2
processManagement:
fork: true
net:
bindIp: 0.0.0.0
port: 37015
EOF
mongod_shard3.conf
cat > /usr/local/mongodb/etc/mongod_shard3.conf <<EOF
security:
authorization: enabled
keyFile: /usr/local/mongodb/etc/mongo-keyfile
sharding:
clusterRole: shardsvr
replication:
replSetName: shard3
processManagement:
fork: true
storage:
dbPath: /data/disk3/mongodb/shard3
net:
bindIp: 0.0.0.0
port: 37016
EOF
启动3个 shard
mongod --config /usr/local/mongodb/etc/mongod_shard1.conf
mongod --config /usr/local/mongodb/etc/mongod_shard2.conf
mongod --config /usr/local/mongodb/etc/mongod_shard3.conf
or
cat > /lib/systemd/system/mongod-shard1.service <<EOF
[Unit]
Description=MongoDB Database Server
Documentation=https://docs.mongodb.org/manual
After=network-online.target
Wants=network-online.target
[Service]
User=mongodb
Group=mongodb
EnvironmentFile=-/etc/default/mongod
Environment="MONGODB_CONFIG_OVERRIDE_NOFORK=1"
Environment="GLIBC_TUNABLES=glibc.pthread.rseq=0"
ExecStart=/usr/local/mongodb/bin/mongod --config /usr/local/mongodb/etc/mongod_shard1.conf
RuntimeDirectory=mongodb
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for mongod as specified in
# https://docs.mongodb.com/manual/reference/ulimit/#recommended-ulimit-settings
[Install]
WantedBy=multi-user.target
EOF
systemctl enable mongod-shard1 --now
systemctl enable mongod-shard2 --now
systemctl enable mongod-shard3 --now
启动分片副本集
mongosh --host 192.168.174.100 --port 37014
// 初始化每个分片的副本集
rs.initiate({
_id: "shard1",
members: [
{ _id: 0, host: "192.168.174.100:37014" },
{ _id: 1, host: "192.168.174.101:37014" },
{ _id: 2, host: "192.168.174.102:37014" }
]
});
mongosh --host 192.168.174.100 --port 37015
rs.initiate({
_id: "shard2",
members: [
{ _id: 0, host: "192.168.174.100:37015" },
{ _id: 1, host: "192.168.174.101:37015" },
{ _id: 2, host: "192.168.174.102:37015" }
]
});
mongosh --host 192.168.174.100 --port 37016
rs.initiate({
_id: "shard3",
members: [
{ _id: 0, host: "192.168.174.100:37016" },
{ _id: 1, host: "192.168.174.101:37016" },
{ _id: 2, host: "192.168.174.102:37016" }
]
});
mongos
在所有节点上添加以下配置
mongos.conf
cat > /usr/local/mongodb/etc/mongos.conf <<EOF
sharding:
configDB: configReplSet/192.168.174.100:37013,192.168.174.101:37013,192.168.174.102:37013
net:
bindIp: 0.0.0.0
port: 37017
systemLog:
destination: file
path: /data/mongodb/mongos/mongos.log
processManagement:
pidFilePath: /data/mongodb/mongos/mongos.pid
fork: true
EOF
启动 mongos
mongos -f /usr/local/mongodb/etc/mongos.conf
or
cat > /lib/systemd/system/mongod.service <<EOF
[Unit]
Description=MongoDB Database Server
Documentation=https://docs.mongodb.org/manual
After=network-online.target
Wants=network-online.target
[Service]
User=mongodb
Group=mongodb
EnvironmentFile=-/etc/default/mongod
Environment="MONGODB_CONFIG_OVERRIDE_NOFORK=1"
Environment="GLIBC_TUNABLES=glibc.pthread.rseq=0"
ExecStart=/usr/local/mongodb/bin/mongos --config /usr/local/mongodb/etc/mongos.conf
RuntimeDirectory=mongodb
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for mongod as specified in
# https://docs.mongodb.com/manual/reference/ulimit/#recommended-ulimit-settings
[Install]
WantedBy=multi-user.target
EOF
systemctl enable mongod --now
向集群添加分片
mongosh --host localhost --port 37017
sh.addShard( "shard1/192.168.174.100:37014,192.168.174.101:37014,192.168.174.102:37014")
sh.addShard( "shard2/192.168.174.100:37015,192.168.174.101:37015,192.168.174.102:37015")
sh.addShard( "shard3/192.168.174.100:37016,192.168.174.101:37016,192.168.174.102:37016")
查看集群状态
sh.status();
安装 MongoDB Shell
wget https://downloads.mongodb.com/compass/mongosh-2.3.4-linux-x64.tgz
tar xf mongosh-2.3.4-linux-x64.tgz
./mongosh-2.3.4-linux-x64/bin/mongosh --version
2.3.4
参考文档
https://www.mongodb.com/zh-cn/docs/manual/administration/deploy-manage-self-managed-sharded-clusters/