mongodb

主从配置

新建个文件夹:mkdir replica_sets

将压缩包移动:mv mongodb-linux-x86_64-rhel70-4.2.19.tgz replica_sets/

进入文件并解压:tar -xvf mongodb-linux-x86_64-rhel70-4.2.19.tgz

进入解压完的文件:cd mongodb-linux-x86_64-rhel70-4.2.19

新增启动配置类:vi mongo_37017.conf

# 主节点配置 
dbpath=/data/mongo/data/server1
bind_ip=0.0.0.0
port=37017
fork=true
logpath=/data/mongo/logs/server1.log
replSet=lagouCluster

#从节点1配置 mongo_37019.conf
dbpath=/data/mongo/data/server2
bind_ip=0.0.0.0
port=37018
fork=true
logpath=/data/mongo/logs/server2.log
replSet=lagouCluster

#从节点2配置 mongo_37019.conf
dbpath=/data/mongo/data/server3
bind_ip=0.0.0.0
port=37019
fork=true
logpath=/data/mongo/logs/server3.log
replSet=lagouCluster

  新增配置类中的文件:mkdir /data/mongo/data/server1 -p

            mkdir /data/mongo/data/server2 -p

            mkdir /data/mongo/data/server3 -p

            mkdir /data/mongo/logs -p

将所有服务都启动

./bin/mongod -f mongo_37017.conf

./bin/mongod -f mongo_37018.conf

./bin/mongod -f mongo_37019.conf

随便进入一个服务:./bin/mongo --port 37017 (_id:集群名称配置文件一致,members:集群成员:priority:越高优先级越高,越有可能成为主库)

var cfg ={"_id":"lagouCluster",
 "protocolVersion" : 1,
 "members":[
 {"_id":1,"host":"192.168.211.136:37017","priority":10},
 {"_id":2,"host":"192.168.211.136:37018"}
 ]
 }
rs.initiate(cfg)//初始化
rs.status()//查看状态

  

在主节点上增加节点
rs.add("192.168.211.136:37019")
在主节点上删除slave 节点
rs.remove("192.168.211.136:37019")

  从节点同步数据:rs.slaveOk()

新增仲裁节点:(数据库安装与配置)与前面相同

在主节点下运行
var cfg ={"_id":"lagouCluster", "protocolVersion" : 1, "members":[ {"_id":1,"host":"192.168.211.136:37017","priority":10}, {"_id":2,"host":"192.168.211.136:37018","priority":0}, {"_id":3,"host":"192.168.211.136:37019","priority":5}, {"_id":4,"host":"192.168.211.136:37020","arbiterOnly":true}//仲裁节点 ] }; // 重新装载配置,并重新⽣成集群节点。 rs.reconfig(cfg) //重新查看集群状态 rs.status()
rs.addArb("192.168.211.136:37020")
 
分片配置节点的配置
在bin同级目录新增:mkdir config
进入config编辑配置文件:vi config-17017.conf
# 数据库⽂件位置
dbpath=config/config1
#⽇志⽂件位置
logpath=config/logs/config1.log
# 以追加⽅式写⼊⽇志
logappend=true
# 是否以守护进程⽅式运⾏
fork = true
bind_ip=0.0.0.0
port = 17017
# 表示是⼀个配置服务器
configsvr=true
#配置服务器副本集名称
replSet=configsvr

 同时新增配置文件的两个文件:mkdir config1 logs

复制config-17017.conf到成3个配置服务:cp config-17017.conf config-17018.conf

                    cp config-17017.conf config-17019.conf  

随便启动一个服务: ./bin/mongo --port 17017

添加 配置节点集群 注意use admin
use admin
var cfg ={"_id":"configsvr",
"members":[
{"_id":1,"host":"192.168.211.136:17017"},
{"_id":2,"host":"192.168.211.136:17018"},
{"_id":3,"host":"192.168.211.136:17019"}]
};
rs.initiate(cfg)//初始化

  分片节点1:shard1的配置:

在bin同级上新增文件:mkdir shard

进入shard新增配置文件中的文件:cd shard        mkdir shard1 shard2

进入shard1新增配置文件中的文件:cd shard1     mkdir shard1-37017 shard1-37018 shard1-37019

vi shard1-37017.conf

cp shard1-37017.conf shard1-37018.conf

cp shard1-37017.conf shard1-37019.conf

dbpath=shard/shard1/shard1-37017
bind_ip=0.0.0.0
port=37017
fork=true
logpath=shard/shard1/shard1-37017.log
replSet=shard1
shardsvr=true

dbpath=shard/shard1/shard1-37018
bind_ip=0.0.0.0
port=37018
fork=true
logpath=shard/shard1/logs/shard1-37018.log
replSet=shard1
shardsvr=true 
 
dbpath=shard/shard1/shard1-37019
bind_ip=0.0.0.0
port=37019
fork=true
logpath=shard/shard1/logs/shard1-37019.log
replSet=shard1
shardsvr=true

  

启动每个mongod 然后进⼊其中⼀个进⾏集群配置
var cfg ={"_id":"shard1",
"protocolVersion" : 1,
"members":[
{"_id":1,"host":"192.168.211.136:37017"},
{"_id":2,"host":"192.168.211.136:37018"},
{"_id":3,"host":"192.168.211.136:37019"},
{"_id":4,"host":"192.168.211.136:37020","arbiterOnly":true}
]
};
rs.initiate(cfg)
rs.reconfig(cfg)
rs.status()
var cfg ={"_id":"shard3",
"protocolVersion" : 1,
"members":[
{"_id":1,"host":"192.168.211.136:38017"},
{"_id":2,"host":"192.168.211.136:38018","arbiterOnly":true}
]
};

  分片节点2

shard2集群搭建47017到47019(同上)
mkdir shard2-47017 shard2-47018 shard2-47019 logs
dbpath=shard/shard2/shard2-47017
bind_ip=0.0.0.0
port=47017
fork=true
logpath=shard/shard2/logs/shard2-47017.log
replSet=shard2
shardsvr=true 

dbpath=shard/shard2/shard2-47018
bind_ip=0.0.0.0
port=47018
fork=true
logpath=shard/shard2/logs/shard2-47018.log
replSet=shard2
shardsvr=true 

dbpath=shard/shard2/shard2-47019
bind_ip=0.0.0.0
port=47019
fork=true
logpath=shard/shard2/logs/shard2-47019.log
replSet=shard2

  

启动每个mongod 然后进⼊其中⼀个进⾏集群配置
var cfg ={"_id":"shard2",
"protocolVersion" : 1,
"members":[
{"_id":1,"host":"192.168.211.136:47017"},
{"_id":2,"host":"192.168.211.136:47018"},
{"_id":3,"host":"192.168.211.136:47019"}
]
};
rs.initiate(cfg)
rs.status()

  分片路由的配置:

在bin的同级目录新建文件:mkdir route/logs -p

进入route新建配置文件:cd route    vi route-27017.conf

port=27017
bind_ip=0.0.0.0
fork=true
logpath=route/logs/route.log
configdb=configsvr/192.168.211.136:17017,192.168.211.136:17018,192.168.211.136:17019

  路由启动服务:./bin/mongos -f route/route-27017.conf

路由添加分片节点

sh.status() 
sh.addShard("shard1/192.168.211.136:37017,192.168.211.136:37018,192.168.211.136:37019,192.168.211.136:37020"); 
sh.addShard("shard2/192.168.211.136:47017,192.168.211.136:47018,192.168.211.136:47019");
sh.status()
为数据库开启分⽚功能
sh.enableSharding("数据库名")
为指定集合开启分⽚功能
sh.shardCollection("数据库名.表名",{"⽚键字段名如 name":索引说明})  //hash  "name":"hashed"

------------恢复内容开始------------

主从配置

新建个文件夹:mkdir replica_sets

将压缩包移动:mv mongodb-linux-x86_64-rhel70-4.2.19.tgz replica_sets/

进入文件并解压:tar -xvf mongodb-linux-x86_64-rhel70-4.2.19.tgz

进入解压完的文件:cd mongodb-linux-x86_64-rhel70-4.2.19

新增启动配置类:vi mongo_37017.conf

# 主节点配置 
dbpath=/data/mongo/data/server1
bind_ip=0.0.0.0
port=37017
fork=true
logpath=/data/mongo/logs/server1.log
replSet=lagouCluster

#从节点1配置 mongo_37019.conf
dbpath=/data/mongo/data/server2
bind_ip=0.0.0.0
port=37018
fork=true
logpath=/data/mongo/logs/server2.log
replSet=lagouCluster

#从节点2配置 mongo_37019.conf
dbpath=/data/mongo/data/server3
bind_ip=0.0.0.0
port=37019
fork=true
logpath=/data/mongo/logs/server3.log
replSet=lagouCluster

  新增配置类中的文件:mkdir /data/mongo/data/server1 -p

            mkdir /data/mongo/data/server2 -p

            mkdir /data/mongo/data/server3 -p

            mkdir /data/mongo/logs -p

将所有服务都启动

./bin/mongod -f mongo_37017.conf

./bin/mongod -f mongo_37018.conf

./bin/mongod -f mongo_37019.conf

随便进入一个服务:./bin/mongo --port 37017 (_id:集群名称配置文件一致,members:集群成员:priority:越高优先级越高,越有可能成为主库)

var cfg ={"_id":"lagouCluster",
 "protocolVersion" : 1,
 "members":[
 {"_id":1,"host":"192.168.211.136:37017","priority":10},
 {"_id":2,"host":"192.168.211.136:37018"}
 ]
 }
rs.initiate(cfg)//初始化
rs.status()//查看状态

  

在主节点上增加节点
rs.add("192.168.211.136:37019")
在主节点上删除slave 节点
rs.remove("192.168.211.136:37019")

  从节点同步数据:rs.slaveOk()

新增仲裁节点:(数据库安装与配置)与前面相同

在主节点下运行
var cfg ={"_id":"lagouCluster", "protocolVersion" : 1, "members":[ {"_id":1,"host":"192.168.211.136:37017","priority":10}, {"_id":2,"host":"192.168.211.136:37018","priority":0}, {"_id":3,"host":"192.168.211.136:37019","priority":5}, {"_id":4,"host":"192.168.211.136:37020","arbiterOnly":true}//仲裁节点 ] }; // 重新装载配置,并重新⽣成集群节点。 rs.reconfig(cfg) //重新查看集群状态 rs.status()
rs.addArb("192.168.211.136:37020")
 
分片配置节点的配置
在bin同级目录新增:mkdir config
进入config编辑配置文件:vi config-17017.conf
# 数据库⽂件位置
dbpath=config/config1
#⽇志⽂件位置
logpath=config/logs/config1.log
# 以追加⽅式写⼊⽇志
logappend=true
# 是否以守护进程⽅式运⾏
fork = true
bind_ip=0.0.0.0
port = 17017
# 表示是⼀个配置服务器
configsvr=true
#配置服务器副本集名称
replSet=configsvr

 同时新增配置文件的两个文件:mkdir config1 logs

复制config-17017.conf到成3个配置服务:cp config-17017.conf config-17018.conf

                    cp config-17017.conf config-17019.conf  

随便启动一个服务: ./bin/mongo --port 17017

添加 配置节点集群 注意use admin
use admin
var cfg ={"_id":"configsvr",
"members":[
{"_id":1,"host":"192.168.211.136:17017"},
{"_id":2,"host":"192.168.211.136:17018"},
{"_id":3,"host":"192.168.211.136:17019"}]
};
rs.initiate(cfg)//初始化

  分片节点1:shard1的配置:

在bin同级上新增文件:mkdir shard

进入shard新增配置文件中的文件:cd shard        mkdir shard1 shard2

进入shard1新增配置文件中的文件:cd shard1     mkdir shard1-37017 shard1-37018 shard1-37019

vi shard1-37017.conf

cp shard1-37017.conf shard1-37018.conf

cp shard1-37017.conf shard1-37019.conf

dbpath=shard/shard1/shard1-37017
bind_ip=0.0.0.0
port=37017
fork=true
logpath=shard/shard1/shard1-37017.log
replSet=shard1
shardsvr=true

dbpath=shard/shard1/shard1-37018
bind_ip=0.0.0.0
port=37018
fork=true
logpath=shard/shard1/logs/shard1-37018.log
replSet=shard1
shardsvr=true 
 
dbpath=shard/shard1/shard1-37019
bind_ip=0.0.0.0
port=37019
fork=true
logpath=shard/shard1/logs/shard1-37019.log
replSet=shard1
shardsvr=true

  

启动每个mongod 然后进⼊其中⼀个进⾏集群配置
var cfg ={"_id":"shard1",
"protocolVersion" : 1,
"members":[
{"_id":1,"host":"192.168.211.136:37017"},
{"_id":2,"host":"192.168.211.136:37018"},
{"_id":3,"host":"192.168.211.136:37019"},
{"_id":4,"host":"192.168.211.136:37020","arbiterOnly":true}
]
};
rs.initiate(cfg)
rs.reconfig(cfg)
rs.status()

  分片节点2

shard2集群搭建47017到47019(同上)
mkdir shard2-47017 shard2-47018 shard2-47019 logs
dbpath=shard/shard2/shard2-47017
bind_ip=0.0.0.0
port=47017
fork=true
logpath=shard/shard2/logs/shard2-47017.log
replSet=shard2
shardsvr=true 

dbpath=shard/shard2/shard2-47018
bind_ip=0.0.0.0
port=47018
fork=true
logpath=shard/shard2/logs/shard2-47018.log
replSet=shard2
shardsvr=true 

dbpath=shard/shard2/shard2-47019
bind_ip=0.0.0.0
port=47019
fork=true
logpath=shard/shard2/logs/shard2-47019.log
replSet=shard2

  

启动每个mongod 然后进⼊其中⼀个进⾏集群配置
var cfg ={"_id":"shard2",
"protocolVersion" : 1,
"members":[
{"_id":1,"host":"192.168.211.136:47017"},
{"_id":2,"host":"192.168.211.136:47018"},
{"_id":3,"host":"192.168.211.136:47019"}
]
};
rs.initiate(cfg)
rs.status()

  分片路由的配置:

在bin的同级目录新建文件:mkdir route/logs -p

进入route新建配置文件:cd route    vi route-27017.conf

port=27017
bind_ip=0.0.0.0
fork=true
logpath=route/logs/route.log
configdb=configsvr/192.168.211.136:17017,192.168.211.136:17018,192.168.211.136:17019

  路由启动服务:./bin/mongos -f route/route-27017.conf

路由添加分片节点

sh.status() 
sh.addShard("shard1/192.168.211.136:37017,192.168.211.136:37018,192.168.211.136:37019,192.168.211.136:37020"); 
sh.addShard("shard2/192.168.211.136:47017,192.168.211.136:47018,192.168.211.136:47019");
sh.status()
为数据库开启分⽚功能
sh.enableSharding("数据库名")
为指定集合开启分⽚功能
sh.shardCollection("数据库名.表名",{"⽚键字段名如 name":索引说明})  //hash  "name":"hashed"
开启安全认证
  管理员权限
use admin
db.createUser( { user:"root", pwd:"123456", roles:[{role:"root",db:"admin"}] })

  具体某个表的权限

use lagou_resume
db.createUser({
 user:"xx",
 pwd:"123456",
 roles:[{role:"readWrite",db:"集合名"}]
 })

  批量关闭进程

安装psmisc
yum install psmisc
安装完之后可以使⽤killall 命令 快速关闭多个进程
killall mongod
生成秘钥文件
新建文件:mkdir data/mongodb -p
秘钥生成文件到testKeyFile:openssl rand -base64 756 > data/mongodb/testKeyFile.file
修改秘钥文件的权限为600:chmod 600 data/mongodb/testKeyFile.file
配置节点集群和分⽚节点集群开启安全认证和指定密钥⽂件(3+3+3)9个配置文件下新增:
auth=true
keyFile=data/mongodb/testKeyFile.file

  在路由配置⽂件中 设置密钥⽂件

keyFile=data/mongodb/testKeyFile.file
新建个shell执行脚本 vi shartup.sh
./bin/mongod -f config/config-17017.conf
./bin/mongod -f config/config-17018.conf
./bin/mongod -f config/config-17019.conf
./bin/mongod -f shard/shard1/shard1-37017.conf
./bin/mongod -f shard/shard1/shard1-37018.conf
./bin/mongod -f shard/shard1/shard1-37019.conf
./bin/mongod -f shard/shard2/shard2-47017.conf
./bin/mongod -f shard/shard2/shard2-47018.conf
./bin/mongod -f shard/shard2/shard2-47019.conf
./bin/mongos -f route/route-27017.conf

  给sh赋执行权限:chmod +x shartup.sh

posted @ 2022-03-29 20:37  a快乐码农  阅读(29)  评论(0编辑  收藏  举报