MongoDB-6.0.2 Cluster Deployment One-Master Multi-Slave

一、初始化配置

CentOS 8.3 XXX Cluster Initialization Configuration:https://www.cnblogs.com/huaxiayuyi/p/16862622.html

 

二、安装 MongoDB

Installing MongoDB-6.0.2 on CentOS 8.3:https://www.cnblogs.com/huaxiayuyi/p/16871771.html

(补)

bug 太多没安装好

分片0的3个节点的数据目录,RS复制集,1主1备1仲裁


准备10个节点,1个路由routers,3个节点组成复制集作为配置服务器Config Server
6个节点组成复制集作为分片0/1,用于存储数据,
分片1和分片2组成分片数据存储集群,和配置服务路由组成完整地分片集群

分片1的3个节点的数据目录,RS复制集,1主1备1仲裁
数据目录
mkdir /opt/mongodb-602/cluster/rs/n1
mkdir /opt/mongodb-602/cluster/rs/n2
mkdir /opt/mongodb-602/cluster/rs/n3

mkdir /opt/mongodb-602/cluster/pids
mkdir /opt/mongodb-602/cluster/logs
touch /opt/mongodb-602/cluster/logs/config.log

配置服务器的3个节点数据目录,RS复制集,1主2备
mkdir /opt/mongodb-602/cluster/config	27223
mkdir /opt/mongodb-602/cluster/config/data 

cat > /opt/mongodb-602/cluster/config/config.conf << EOF
dbpath = /opt/mongodb-602/cluster/config/data
logpath = /opt/mongodb-602/cluster/logs/config.log
logappend = true
port = 27223
fork = true
bind_ip_all=true
replSet=config0
pidfilepath=/opt/mongodb-602/cluster/pids/config.pid
oplogSize=1000
configsvr=true
EOF

mongod -f /opt/mongodb-602/cluster/config/config.conf

[root@master01 ~]# ps -ef |grep mongod
root        1669       1  2 13:32 ?        00:00:39 mongod -f /opt/mongodb-602/cluster/config/config.conf
root        2017    1519  0 13:56 pts/0    00:00:00 grep --color=auto mongod

[root@master01 !]# tree /opt/mongodb-602/cluster
/opt/mongodb-602/cluster
├── config
│   ├── config.conf
│   ├── data
│   │   ├── collection-0-5674976236851998639.wt
│   │   ├── collection-10-5674976236851998639.wt
│   │   ├── collection-2-5674976236851998639.wt
│   │   ├── collection-4-5674976236851998639.wt
│   │   ├── collection-6-5674976236851998639.wt
│   │   ├── collection-8-5674976236851998639.wt
│   │   ├── diagnostic.data
│   │   │   ├── metrics.2022-11-10T15-14-42Z-00000
│   │   │   └── metrics.interim
│   │   ├── index-11-5674976236851998639.wt
│   │   ├── index-1-5674976236851998639.wt
│   │   ├── index-3-5674976236851998639.wt
│   │   ├── index-5-5674976236851998639.wt
│   │   ├── index-7-5674976236851998639.wt
│   │   ├── index-9-5674976236851998639.wt
│   │   ├── journal
│   │   │   ├── WiredTigerLog.0000000001
│   │   │   ├── WiredTigerPreplog.0000000001
│   │   │   └── WiredTigerPreplog.0000000002
│   │   ├── _mdb_catalog.wt
│   │   ├── mongod.lock
│   │   ├── sizeStorer.wt
│   │   ├── storage.bson
│   │   ├── WiredTiger
│   │   ├── WiredTigerHS.wt
│   │   ├── WiredTiger.lock
│   │   ├── WiredTiger.turtle
│   │   └── WiredTiger.wt
│   ├── router.conf
│   ├── rs-n1.conf
│   ├── rs-n2.conf
│   └── rs-n3.conf
├── logs
│   └── config.log
├── pids
│   └── config.pid
└── rs
    ├── n1
    ├── n2
    └── n3

10 directories, 33 files



mongosh --port 27223
test> use admin
switched to db admin
admin> rs.initiate({
...     "_id": "configserver",
...     "members": [
...         {"_id": 0, "host": "192.168.80.31:27223", "priority": 3},
...         {"_id": 1, "host": "192.168.80.32:27223", "priority": 1},
...         {"_id": 2, "host": "192.168.80.33:27223", "priority": 1}
...     ]
... })
MongoServerError: Rejecting initiate with a set name that differs from command line set name, initiate set name: configserver, command line set name: config0

test> use admin
switched to db admin
admin> rs.initiate({
"_id": "config0",
"members": [
{"_id": 1, "host": "192.168.80.31:27223", "priority": 3},
{"_id": 2, "host": "192.168.80.32:27223", "priority": 1},
{"_id": 3, "host": "192.168.80.33:27223", "priority": 1}
  ]
});
{ ok: 1, lastCommittedOpTime: Timestamp({ t: 1668093442, i: 1 }) }
config0 [direct: other] admin> 

config0 [direct: primary] admin> rs.status()
{
  set: 'config0',
  date: ISODate("2022-11-10T15:17:38.361Z"),
  myState: 1,
  term: Long("1"),
  syncSourceHost: '',
  syncSourceId: -1,
  configsvr: true,
  heartbeatIntervalMillis: Long("2000"),
  majorityVoteCount: 2,
  writeMajorityCount: 2,
  votingMembersCount: 3,
  writableVotingMembersCount: 3,
  optimes: {
    lastCommittedOpTime: { ts: Timestamp({ t: 1668093458, i: 1 }), t: Long("1") },
    lastCommittedWallTime: ISODate("2022-11-10T15:17:38.110Z"),
    readConcernMajorityOpTime: { ts: Timestamp({ t: 1668093458, i: 1 }), t: Long("1") },
    appliedOpTime: { ts: Timestamp({ t: 1668093458, i: 1 }), t: Long("1") },
    durableOpTime: { ts: Timestamp({ t: 1668093458, i: 1 }), t: Long("1") },
    lastAppliedWallTime: ISODate("2022-11-10T15:17:38.110Z"),
    lastDurableWallTime: ISODate("2022-11-10T15:17:38.110Z")
  },
  lastStableRecoveryTimestamp: Timestamp({ t: 1668093442, i: 1 }),
  electionCandidateMetrics: {
    lastElectionReason: 'electionTimeout',
    lastElectionDate: ISODate("2022-11-10T15:17:33.581Z"),
    electionTerm: Long("1"),
    lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 1668093442, i: 1 }), t: Long("-1") },
    lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1668093442, i: 1 }), t: Long("-1") },
    numVotesNeeded: 2,
    priorityAtElection: 3,
    electionTimeoutMillis: Long("10000"),
    numCatchUpOps: Long("0"),
    newTermStartDate: ISODate("2022-11-10T15:17:33.678Z"),
    wMajorityWriteAvailabilityDate: ISODate("2022-11-10T15:17:34.628Z")
  },
  members: [
    {
      _id: 1,
      name: '192.168.80.31:27223',
      health: 1,
      state: 1,
      stateStr: 'PRIMARY',
      uptime: 180,
      optime: { ts: Timestamp({ t: 1668093458, i: 1 }), t: Long("1") },
      optimeDate: ISODate("2022-11-10T15:17:38.000Z"),
      lastAppliedWallTime: ISODate("2022-11-10T15:17:38.110Z"),
      lastDurableWallTime: ISODate("2022-11-10T15:17:38.110Z"),
      syncSourceHost: '',
      syncSourceId: -1,
      infoMessage: '',
      electionTime: Timestamp({ t: 1668093453, i: 1 }),
      electionDate: ISODate("2022-11-10T15:17:33.000Z"),
      configVersion: 1,
      configTerm: 1,
      self: true,
      lastHeartbeatMessage: ''
    },
    {
      _id: 2,
      name: '192.168.80.32:27223',
      health: 1,
      state: 2,
      stateStr: 'SECONDARY',
      uptime: 15,
      optime: { ts: Timestamp({ t: 1668093457, i: 1 }), t: Long("1") },
      optimeDurable: { ts: Timestamp({ t: 1668093457, i: 1 }), t: Long("1") },
      optimeDate: ISODate("2022-11-10T15:17:37.000Z"),
      optimeDurableDate: ISODate("2022-11-10T15:17:37.000Z"),
      lastAppliedWallTime: ISODate("2022-11-10T15:17:38.110Z"),
      lastDurableWallTime: ISODate("2022-11-10T15:17:38.110Z"),
      lastHeartbeat: ISODate("2022-11-10T15:17:37.602Z"),
      lastHeartbeatRecv: ISODate("2022-11-10T15:17:36.619Z"),
      pingMs: Long("0"),
      lastHeartbeatMessage: '',
      syncSourceHost: '192.168.80.31:27223',
      syncSourceId: 1,
      infoMessage: '',
      configVersion: 1,
      configTerm: 1
    },
    {
      _id: 3,
      name: '192.168.80.33:27223',
      health: 1,
      state: 2,
      stateStr: 'SECONDARY',
      uptime: 15,
      optime: { ts: Timestamp({ t: 1668093457, i: 1 }), t: Long("1") },
      optimeDurable: { ts: Timestamp({ t: 1668093457, i: 1 }), t: Long("1") },
      optimeDate: ISODate("2022-11-10T15:17:37.000Z"),
      optimeDurableDate: ISODate("2022-11-10T15:17:37.000Z"),
      lastAppliedWallTime: ISODate("2022-11-10T15:17:38.110Z"),
      lastDurableWallTime: ISODate("2022-11-10T15:17:38.110Z"),
      lastHeartbeat: ISODate("2022-11-10T15:17:37.602Z"),
      lastHeartbeatRecv: ISODate("2022-11-10T15:17:36.614Z"),
      pingMs: Long("1"),
      lastHeartbeatMessage: '',
      syncSourceHost: '192.168.80.31:27223',
      syncSourceId: 1,
      infoMessage: '',
      configVersion: 1,
      configTerm: 1
    }
  ],
  ok: 1,
  lastCommittedOpTime: Timestamp({ t: 1668093458, i: 1 }),
  '$clusterTime': {
    clusterTime: Timestamp({ t: 1668093458, i: 1 }),
    signature: {
      hash: Binary(Buffer.from("0000000000000000000000000000000000000000", "hex"), 0),
      keyId: Long("0")
    }
  },
  operationTime: Timestamp({ t: 1668093458, i: 1 })
}
config0 [direct: primary] admin> 


cat /opt/mongodb-602/cluster/logs/config.log

-----------------------------   shard1   ----------------------------------------------
cat > /opt/mongodb-602/cluster/config/rs-n1.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n1
logpath = /opt/mongodb-602/cluster/logs/rs-n1.log
logappend = true
port = 27218
fork = true
bind_ip_all=true
replSet=shard1
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n1.pid
oplogSize=1000
shardsvr=true
EOF

cat > /opt/mongodb-602/cluster/config/rs-n2.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n2
logpath = /opt/mongodb-602/cluster/logs/rs-n2.log
logappend = true
port = 27219
fork = true
bind_ip_all=true
replSet=shard1
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n2.pid
oplogSize=1000
shardsvr=true
EOF

cat > /opt/mongodb-602/cluster/config/rs-n3.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n3
logpath = /opt/mongodb-602/cluster/logs/rs-n3.log
logappend = true
port = 27220
fork = true
bind_ip_all=true
replSet=shard1
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n3.pid
oplogSize=1000
shardsvr=true
EOF

cat > /opt/mongodb-602/cluster/config/rs-n4.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n4
logpath = /opt/mongodb-602/cluster/logs/rs-n4.log
logappend = true
port = 27221
fork = true
bind_ip_all=true
replSet=shard1
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n4.pid
oplogSize=1000
shardsvr=true
EOF

mongod -f /opt/mongodb-602/cluster/config/rs-n1.conf
mongod -f /opt/mongodb-602/cluster/config/rs-n2.conf
mongod -f /opt/mongodb-602/cluster/config/rs-n3.conf
mongod -f /opt/mongodb-602/cluster/config/rs-n4.conf
ps -ef |grep mongod

[root@master01 cluster]# mongod -f /opt/mongodb-602/cluster/config/rs-n1.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1683
child process started successfully, parent exiting
[root@master01 cluster]# mongod -f /opt/mongodb-602/cluster/config/rs-n2.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1745
child process started successfully, parent exiting
[root@master01 cluster]# mongod -f /opt/mongodb-602/cluster/config/rs-n3.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1807
child process started successfully, parent exiting
[root@master01 cluster]# mongod -f /opt/mongodb-602/cluster/config/rs-n4.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1869
child process started successfully, parent exiting
[root@master01 cluster]# ps -ef |grep mongod
root        1525       1  4 23:36 ?        00:00:07 mongod -f /opt/mongodb-602/cluster/config/config.conf
root        1683       1 18 23:39 ?        00:00:02 mongod -f /opt/mongodb-602/cluster/config/rs-n1.conf
root        1745       1 22 23:39 ?        00:00:02 mongod -f /opt/mongodb-602/cluster/config/rs-n2.conf
root        1807       1 29 23:39 ?        00:00:02 mongod -f /opt/mongodb-602/cluster/config/rs-n3.conf
root        1869       1 45 23:39 ?        00:00:02 mongod -f /opt/mongodb-602/cluster/config/rs-n4.conf
root        1930    1500  0 23:39 pts/0    00:00:00 grep --color=auto mongod


[root@master01 ~]# mongosh --port 27218

test> use admin
switched to db admin
admin> 

admin> rs.initiate({
    "_id": "shard1",
    "members": [
        {"_id": 1, "host": "192.168.80.31:27218", "priority": 3},
        {"_id": 2, "host": "192.168.80.31:27219", "priority": 2},
		{"_id": 3, "host": "192.168.80.31:27220", "priority": 2},
        {"_id": 4, "host": "192.168.80.31:27221", "arbiterOnly": true}
    ]
});
{ ok: 1 }
shard1 [direct: other] admin> 

shard1 [direct: secondary] admin> 

shard1 [direct: primary] admin> rs.isMaster()
{
  topologyVersion: {
    processId: ObjectId("636d1b2656c3218b6a3c6494"),
    counter: Long("7")
  },
  hosts: [
    '192.168.80.31:27218',
    '192.168.80.31:27219',
    '192.168.80.31:27220'
  ],
  arbiters: [ '192.168.80.31:27221' ],
  setName: 'shard1',
  setVersion: 1,
  ismaster: true,
  secondary: false,
  primary: '192.168.80.31:27218',
  me: '192.168.80.31:27218',
  electionId: ObjectId("7fffffff0000000000000001"),
  lastWrite: {
    opTime: { ts: Timestamp({ t: 1668094842, i: 6 }), t: Long("1") },
    lastWriteDate: ISODate("2022-11-10T15:40:42.000Z"),
    majorityOpTime: { ts: Timestamp({ t: 1668094842, i: 6 }), t: Long("1") },
    majorityWriteDate: ISODate("2022-11-10T15:40:42.000Z")
  },
  isImplicitDefaultMajorityWC: false,
  maxBsonObjectSize: 16777216,
  maxMessageSizeBytes: 48000000,
  maxWriteBatchSize: 100000,
  localTime: ISODate("2022-11-10T15:40:49.435Z"),
  logicalSessionTimeoutMinutes: 30,
  connectionId: 6,
  minWireVersion: 0,
  maxWireVersion: 17,
  readOnly: false,
  ok: 1,
  '$clusterTime': {
    clusterTime: Timestamp({ t: 1668094842, i: 6 }),
    signature: {
      hash: Binary(Buffer.from("0000000000000000000000000000000000000000", "hex"), 0),
      keyId: Long("0")
    }
  },
  operationTime: Timestamp({ t: 1668094842, i: 6 }),
  isWritablePrimary: true
}

---------------------------------------------------------------------------------------


cat > /opt/mongodb-602/cluster/config/rs-n1.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n1
logpath = /opt/mongodb-602/cluster/logs/rs-n1.log
logappend = true
port = 27218
fork = true
bind_ip_all=true
replSet=shard2
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n1.pid
oplogSize=1000
shardsvr=true
EOF

cat > /opt/mongodb-602/cluster/config/rs-n2.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n2
logpath = /opt/mongodb-602/cluster/logs/rs-n2.log
logappend = true
port = 27219
fork = true
bind_ip_all=true
replSet=shard2
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n2.pid
oplogSize=1000
shardsvr=true
EOF

cat > /opt/mongodb-602/cluster/config/rs-n3.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n3
logpath = /opt/mongodb-602/cluster/logs/rs-n3.log
logappend = true
port = 27220
fork = true
bind_ip_all=true
replSet=shard2
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n3.pid
oplogSize=1000
shardsvr=true
EOF




[root@slave01 ~]# mongod -f /opt/mongodb-602/cluster/config/rs-n1.conf 
about to fork child process, waiting until server is ready for connections.
forked process: 2289
child process started successfully, parent exiting
[root@slave01 ~]# mongod -f /opt/mongodb-602/cluster/config/rs-n2.conf 
about to fork child process, waiting until server is ready for connections.
forked process: 2351
child process started successfully, parent exiting
[root@slave01 ~]# mongod -f /opt/mongodb-602/cluster/config/rs-n3.conf 
about to fork child process, waiting until server is ready for connections.
forked process: 2414
child process started successfully, parent exiting
[root@slave01 ~]# ps -ef |grep mongod
root        1993       1  2 13:48 ?        00:01:16 mongod -f /opt/mongodb-602/cluster/config/config.conf
root        2289       1 19 14:34 ?        00:00:03 mongod -f /opt/mongodb-602/cluster/config/rs-n1.conf
root        2351       1 33 14:34 ?        00:00:03 mongod -f /opt/mongodb-602/cluster/config/rs-n2.conf
root        2414       1 70 14:34 ?        00:00:03 mongod -f /opt/mongodb-602/cluster/config/rs-n3.conf
root        2475    2229  0 14:34 pts/1    00:00:00 grep --color=auto mongod


[root@slave01 ~]# mongosh --port 27218
use admin
rs.initiate({
    "_id": "shard2",
    "members": [
        {"_id": 1, "host": "192.168.80.32:27218", "priority": 3},
        {"_id": 2, "host": "192.168.80.32:27219", "priority": 1},
        {"_id": 3, "host": "192.168.80.32:27220", "arbiterOnly": true}
    ]
})
shard2 [direct: other] admin> 

shard2 [direct: secondary] admin> 

-------------------------------------------------------------------------



cat > /opt/mongodb-602/cluster/config/rs-n1.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n1
logpath = /opt/mongodb-602/cluster/logs/rs-n1.log
logappend = true
port = 27218
fork = true
bind_ip_all=true
replSet=shard3
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n1.pid
oplogSize=1000
shardsvr=true
EOF

cat > /opt/mongodb-602/cluster/config/rs-n2.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n2
logpath = /opt/mongodb-602/cluster/logs/rs-n2.log
logappend = true
port = 27219
fork = true
bind_ip_all=true
replSet=shard3
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n2.pid
oplogSize=1000
shardsvr=true
EOF

cat > /opt/mongodb-602/cluster/config/rs-n3.conf << EOF
dbpath = /opt/mongodb-602/cluster/rs/n3
logpath = /opt/mongodb-602/cluster/logs/rs-n3.log
logappend = true
port = 27220
fork = true
bind_ip_all=true
replSet=shard3
pidfilepath=/opt/mongodb-602/cluster/pids/rs-n3.pid
oplogSize=1000
shardsvr=true
EOF

[root@slave02 ~]# mongod -f /opt/mongodb-602/cluster/config/rs-n1.conf 
about to fork child process, waiting until server is ready for connections.
forked process: 2244
child process started successfully, parent exiting
[root@slave02 ~]# mongod -f /opt/mongodb-602/cluster/config/rs-n2.conf 
about to fork child process, waiting until server is ready for connections.
forked process: 2306
child process started successfully, parent exiting
[root@slave02 ~]#  mongod -f /opt/mongodb-602/cluster/config/rs-n3.conf 
about to fork child process, waiting until server is ready for connections.
forked process: 2368
child process started successfully, parent exiting

[root@slave02 ~]#  ps -ef |grep mongod
root        1882       1  2 13:48 ?        00:01:16 mongod -f /opt/mongodb-602/cluster/config/config.conf
root        2244       1 18 14:46 ?        00:00:02 mongod -f /opt/mongodb-602/cluster/config/rs-n1.conf
root        2306       1 31 14:46 ?        00:00:03 mongod -f /opt/mongodb-602/cluster/config/rs-n2.conf
root        2368       1 67 14:46 ?        00:00:04 mongod -f /opt/mongodb-602/cluster/config/rs-n3.conf
root        2429    2129  0 14:46 pts/1    00:00:00 grep --color=auto mongod

[root@slave02 ~]# mongosh --port 27218
use admin
rs.initiate({
    "_id": "shard3",
    "members": [
        {"_id": 1, "host": "192.168.80.33:27218", "priority": 3},
        {"_id": 2, "host": "192.168.80.33:27219", "priority": 1},
        {"_id": 3, "host": "192.168.80.33:27220", "arbiterOnly": true}
    ]
})
shard2 [direct: other] admin> 

shard2 [direct: secondary] admin> 



----------------------------------------------------


cat > /opt/mongodb-602/cluster/config/router.conf << EOF
logpath = /opt/mongodb-602/cluster/logs/router.log
logappend = true
port = 27230
fork = true
bind_ip_all=true
pidfilepath=/opt/mongodb-602/cluster/pids/router.pid
configdb=config0/192.168.80.31:27223,192.168.80.32:27223,192.168.80.33:27223
EOF




[root@master01 ~]# mongosh --port 27230
Current Mongosh Log ID:	636cbe161c7f8d06ab7c85bf
Connecting to:		mongodb://127.0.0.1:27230/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+1.6.0
Using MongoDB:		6.0.2
Using Mongosh:		1.6.0

For mongosh info see: https://docs.mongodb.com/mongodb-shell/

------
   The server generated these startup warnings when booting
   2022-11-10T14:57:32.450+08:00: Access control is not enabled for the database. Read and write access to data and configuration is unrestricted
   2022-11-10T14:57:32.450+08:00: You are running this process as the root user, which is not recommended
------

[direct: mongos] test> use admin
switched to db admin
[direct: mongos] admin> db.runCommand({"addShard":"shard1/192.168.80.31:27218,192.168.80.31:27219,192.168.80.31:27220"});
MongoServerError: Cannot add shard1/192.168.80.31:27218,192.168.80.31:27219,192.168.80.31:27220 as a shard since the implicit default write concern on this shard is set to {w : 1}, because number of arbiters in the shard's configuration caused the number of writable voting members not to be strictly more than the voting majority. Change the shard configuration or set the cluster-wide write concern using the setDefaultRWConcern command and try again.

状态查阅

rs.status() // 查看集群所有描述
rs.isMaster()  // 查看集群成员,以及当前节点是否是 master
rs.conf() // 查看一下集群的配置信息,比如成员的权重、是否是投票节点等等
rs.remove(":") // 删除节点


{"t":{"$date":"2022-11-10T21:12:52.701+08:00"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"ShardingStateNotInitialized: sharding state is not yet initialized"}}


{"t":{"$date":"2022-11-10T21:12:52.737+08:00"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"ShardNotFound: Failed to create config.system.sessions: cannot create the collection until there are shards"}}


路由
[root@master01 cluster]# mongos -f /opt/mongodb-602/cluster/config/router.conf
about to fork child process, waiting until server is ready for connections.
forked process: 2078
child process started successfully, parent exiting

[root@master01 ~]# mongosh --port 27230
192.168.80.31:27218', '192.168.80.31:27219
use admin

db.runCommand({"addShard":"shard1/192.168.80.31:27218,192.168.80.31:27219,192.168.80.31:27220,192.168.80.31:27221"});

db.runCommand({"addShard":"shard2/192.168.80.32:27218,192.168.80.32:27219,192.168.80.32:27220"})
db.runCommand({"addShard":"shard3/192.168.80.33:27218,192.168.80.33:27219,192.168.80.33:27220"})

db.runCommand({"enablesharding": "data"})
db.createCollection("col")
db.tr.createIndex({"name": 1})
db.runCommand({"shardcollection": "data.col", "key": {"name": "hashed"}})
sh.status()

use data
for (i = 0; i < 200; i++) {
    db.col.insert({"name": "user" + i})
}

rs.slaveOk()
rs.isMaster()
db.getMongo().setReadPref("nearest")

{"t":{"$date":"2022-11-10T23:44:26.797+08:00"},"s":"I",  "c":"CONTROL",  "id":20712,   "ctx":"LogicalSessionCacheReap","msg":"Sessions collection is not set up; waiting until next sessions reap interval","attr":{"error":"ShardingStateNotInitialized: sharding state is not yet initialized"}}
{"t":{"$date":"2022-11-10T23:44:26.797+08:00"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"ShardingStateNotInitialized: sharding state is not yet initialized"}}
{"t":{"$date":"2022-11-10T23:48:25.699+08:00"},"s":"I",  "c":"NETWORK",  "id":22944,   "ctx":"conn19","msg":"Connection ended","attr":{"remote":"192.168.80.31:55128","uuid":"22e11327-ede3-42af-ae47-90cb4baf663c","connectionId":19,"connectionCount":3}}
{"t":{"$date":"2022-11-10T23:49:26.798+08:00"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"ShardingStateNotInitialized: sharding state is not yet initialized"}}
{"t":{"$date":"2022-11-10T23:49:26.798+08:00"},"s":"I",  "c":"CONTROL",  "id":20712,   "ctx":"LogicalSessionCacheReap","msg":"Sessions collection is not set up; waiting until next sessions reap interval","attr":{"error":"ShardingStateNotInitialized: sharding state is not yet initialized"}}

{"t":{"$date":"2022-11-10T23:44:21.388+08:00"},"s":"I",  "c":"CONTROL",  "id":20712,   "ctx":"LogicalSessionCacheReap","msg":"Sessions collection is not set up; waiting until next sessions reap interval","attr":{"error":"ShardingStateNotInitialized: sharding state is not yet initialized"}}
{"t":{"$date":"2022-11-10T23:47:25.704+08:00"},"s":"I",  "c":"NETWORK",  "id":22944,   "ctx":"conn30","msg":"Connection ended","attr":{"remote":"192.168.80.31:43662","uuid":"89c53d6b-7052-4ea3-9baf-881d29d2627c","connectionId":30,"connectionCount":8}}
{"t":{"$date":"2022-11-10T23:48:25.704+08:00"},"s":"I",  "c":"NETWORK",  "id":22944,   "ctx":"conn29","msg":"Connection ended","attr":{"remote":"192.168.80.31:43660","uuid":"58f81716-2744-4361-b8af-ec98af0d7bf4","connectionId":29,"connectionCount":7}}
{"t":{"$date":"2022-11-10T23:49:21.387+08:00"},"s":"I",  "c":"CONTROL",  "id":20714,   "ctx":"LogicalSessionCacheRefresh","msg":"Failed to refresh session cache, will try again at the next refresh interval","attr":{"error":"ShardingStateNotInitialized: sharding state is not yet initialized"}}
{"t":{"$date":"2022-11-10T23:49:21.388+08:00"},"s":"I",  "c":"CONTROL",  "id":20712,   "ctx":"LogicalSessionCacheReap","msg":"Sessions collection is not set up; waiting until next sessions reap interval","attr":{"error":"ShardingStateNotInitialized: sharding state is not yet initialized"}}


[root@master01 ~]# ps -ef |grep mong
root        1525       1  2 23:36 ?        00:00:26 mongod -f /opt/mongodb-602/cluster/config/config.conf
root        1683       1  1 23:39 ?        00:00:15 mongod -f /opt/mongodb-602/cluster/config/rs-n1.conf
root        1745       1  1 23:39 ?        00:00:14 mongod -f /opt/mongodb-602/cluster/config/rs-n2.conf
root        1807       1  1 23:39 ?        00:00:14 mongod -f /opt/mongodb-602/cluster/config/rs-n3.conf
root        1869       1  1 23:39 ?        00:00:12 mongod -f /opt/mongodb-602/cluster/config/rs-n4.conf
root        2094       1  0 23:41 ?        00:00:04 mongos -f /opt/mongodb-602/cluster/config/router.conf
root        2133    1500  1 23:42 pts/0    00:00:08 mongosh mongodb://12
root        2259    2176  0 23:52 pts/1    00:00:00 grep --color=auto mong

 

posted @ 2022-11-09 16:26  娇小赤雅  阅读(353)  评论(0编辑  收藏  举报