linux下安装redis 3.2.1
 
#tar zxvf redis-3.2.1.tar.gz
#cd redis-3.2.1
#make MALLOC=libc
#cd redis-3.2.1/src
#make test
#make install
安装完成后,会有6个redis-*文件从./redis-3.2.1/src/redis-*自动被拷贝到/usr/local/bin/下面
 
#cp ./redis-3.2.1/redis.conf /etc/redis.conf #拷贝配置文件到/etc并修改配置
 
bind 127.0.0.1 10.132.6.118
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile "/var/run/redis_6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /app/redis-3.2.1/data
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
maxclients 50000
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
maxmemory-policy volatile-ttl
 
#cluster-enabled yes
#cluster-config-file "/opt/nodes.conf"
#cluster-node-timeout 5000
 
 
启动redis: #redis-server /etc/redis.conf &
[root@iZ28b5ymck0Z src]# redis-server /etc/redis.conf &
[1] 28671
[root@iZ28b5ymck0Z src]# ps -ef |grep redis
root 28671 22836 0 10:47 pts/0 00:00:00 redis-server 0.0.0.0:6379
root 28675 22836 0 10:47 pts/0 00:00:00 grep redis
 
停止redis: redis-cli shutdown
[root@iZ28b5ymck0Z bin]# redis-cli shutdown
28671:M 24 Jul 10:56:23.219 # User requested shutdown...
28671:M 24 Jul 10:56:23.219 * Saving the final RDB snapshot before exiting.
28671:M 24 Jul 10:56:23.226 * DB saved on disk
28671:M 24 Jul 10:56:23.226 * Removing the pid file.
28671:M 24 Jul 10:56:23.226 # Redis is now ready to exit, bye bye...
[1]+ Done redis-server /etc/redis.conf (wd: ~/redis-3.2.1/src)
(wd now: /usr/local/bin)
[root@iZ28b5ymck0Z bin]# ps -ef |grep redis
root 28687 22836 0 10:56 pts/0 00:00:00 grep redis
 
配置redis为服务并自动启动
 
做一个redis启动脚本
#cd /etc/rc.d/init.d/
#vim redis
#!/bin/sh
#chkconfig: 345 86 14
#description: Startup and shutdown script for Redis
 
PROGDIR=/usr/local/bin #安装路径
PROGNAME=redis-server
DAEMON=$PROGDIR/$PROGNAME
CONFIG=/etc/redis.conf
PIDFILE=/var/run/redis_*.pid
DESC="redis daemon"
SCRIPTNAME=/etc/rc.d/init.d/redis
 
start()
{
if test -x $DAEMON
then
echo -e "Starting $DESC: $PROGNAME"
if $DAEMON $CONFIG
then
echo -e "OK"
else
echo -e "failed"
fi
else
echo -e "Couldn't find Redis Server ($DAEMON)"
fi
}
 
stop()
{
if test -e $PIDFILE
then
echo -e "Stopping $DESC: $PROGNAME"
if kill `cat $PIDFILE`
then
echo -e "OK"
else
echo -e "failed"
fi
else
echo -e "No Redis Server ($DAEMON) running"
fi
}
 
restart()
{
echo -e "Restarting $DESC: $PROGNAME"
stop
start
}
 
list()
{
ps aux | grep $PROGNAME
}
 
case $1 in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
list)
list
;;
 
*)
echo "Usage: $SCRIPTNAME {start|stop|restart|list}" >&2
exit 1
;;
esac
exit 0
#chmod 755 redis
#chkconfig --add redis
#chkconfig redis on
#chkconfig --list redis
 
检验配置:
#service redis start &
#service redis list
[root@iZ28b5ymck0Z bin]# redis-cli
127.0.0.1:6379>
#service redis stop
#serivce redis restart
 
 
 
Redis常用命令总结
 
[root@iZ28elppcznZ logs]# redis-cli -h 127.0.0.1
redis 127.0.0.1:6379>
redis 127.0.0.1:6379>
redis 127.0.0.1:6379> ping
PONG
redis 127.0.0.1:6379> select 1
OK
redis 127.0.0.1:6379[1]> dbsize
(integer) 0
redis 127.0.0.1:6379[1]> select 0
OK
redis 127.0.0.1:6379> dbsize
(integer) 0
redis 127.0.0.1:6379> info
 
 
 
 
部署redis3.x-cluster
Cluster node
172.172.178.220:6379
172.172.178.221:6379
172.172.178.222:6379
172.172.178.223:6379
172.172.178.224:6379
172.172.178.225:6379
 
启动redis各节点实例
redis-server /opt/redis.conf
[root@node1 ~]# cat /var/run/redis_6379.log
_._
_.-“__ ''-._
_.-“ `. `_. ''-._ Redis 3.2.1 (00000000/0) 64 bit
.-“ .-“`. “`\/ _.,_ ''-._
( ' , .-` | `, ) Running in standalone mode
|`-._`-…-` __…-.“-._|'` _.-'| Port: 6379
| `-._ `._ / _.-' | PID: 27555
`-._ `-._ `-./ _.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' | http://redis.io
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
27555:M 29 Jun 19:43:03.276 # Server started, Redis version 3.2.1
27555:M 29 Jun 19:43:03.276 * The server is now ready to accept connections on port 6379
 
安装依赖lib
yum install ruby rubygems -y
 
安装gem-redis
wget https://rubygems.global.ssl.fastly.net/gems/redis-3.2.1.gem
gem install -l redis-3.2.1.gem
 
复制cluster管理程序到/usr/local/bin
scp redis-3.2.1/src/redis-trib.rb /usr/local/bin/redis-trib
 
创建cluster
–replicas 1 表示为集群中的每个主节点创建一个从节点
[root@node1 ~]# redis-trib create –replicas 1 172.172.178.220:6379 172.172.178.221:6379 172.172.178.222:6379 172.172.178.223:6379 172.172.178.224:6379 172.172.178.225:6379
>>> Creating cluster
>>> Performing hash slots allocation on 6 nodes…
Using 3 masters:
172.172.178.225:6379
172.172.178.224:6379
172.172.178.223:6379
Adding replica 172.172.178.222:6379 to 172.172.178.225:6379
Adding replica 172.172.178.221:6379 to 172.172.178.224:6379
Adding replica 172.172.178.220:6379 to 172.172.178.223:6379
S: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
replicates cd77344524a6ed5a4d1addfd34d7dd720c272386
S: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
replicates e3e1f4f9254d6e26bf381b9d83456a62fa555f62
S: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
replicates fc4d4c03b2ab454f230f0af3b84930b26b6a56ac
M: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots:10923-16383 (5461 slots) master
M: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots:5461-10922 (5462 slots) master
M: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots:0-5460 (5461 slots) master
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join….
>>> Performing Cluster Check (using node 172.172.178.220:6379)
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots: (0 slots) master
replicates cd77344524a6ed5a4d1addfd34d7dd720c272386
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots: (0 slots) master
replicates e3e1f4f9254d6e26bf381b9d83456a62fa555f62
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots: (0 slots) master
replicates fc4d4c03b2ab454f230f0af3b84930b26b6a56ac
M: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots:10923-16383 (5461 slots) master
M: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots:5461-10922 (5462 slots) master
M: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots:0-5460 (5461 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
 
[root@node1 ~]# redis-cli cluster nodes
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master – 0 1467208947127 5 connected 5461-10922
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 slave fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 0 1467208947127 6 connected
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,slave cd77344524a6ed5a4d1addfd34d7dd720c272386 0 0 1 connected
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master – 0 1467208948629 6 connected 0-5460
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 slave e3e1f4f9254d6e26bf381b9d83456a62fa555f62 0 1467208949131 5 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master – 0 1467208948128 4 connected 10923-16383
[root@node1 ~]#
[root@node1 ~]# tailf /opt/nodes.conf
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master – 0 1467208714060 5 connected 5461-10922
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 slave fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 0 1467208716066 6 connected
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,slave cd77344524a6ed5a4d1addfd34d7dd720c272386 0 0 1 connected
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master – 0 1467208715063 6 connected 0-5460
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 slave e3e1f4f9254d6e26bf381b9d83456a62fa555f62 0 1467208714562 5 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master – 0 1467208715564 4 connected 10923-16383
vars currentEpoch 6 lastVoteEpoch 0
 
测试cluster
[root@node1 ~]# redis-cli -c -p 6379 -h localhost
127.0.0.1:6379> get name
-> Redirected to slot [5798] located at 172.172.178.224:6379
(nil)
172.172.178.224:6379> set name cluster-test
OK
172.172.178.224:6379> get name
"cluster-test"
172.172.178.224:6379> KEYS *
1) "name"
 
kill 223上的master测试故障转移,可以看到220切换成了master角色
[root@node4 ~]# redis-cli shutdown
[root@node1 ~]# redis-cli cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 slave e3e1f4f9254d6e26bf381b9d83456a62fa555f62 0 1467209895832 5 connected
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master – 0 1467209896334 5 connected 5461-10922
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 slave fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 0 1467209894829 6 connected
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 10923-16383
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master – 0 1467209895832 6 connected 0-5460
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master,fail – 1467209864421 1467209863721 4 disconnected
kill 224上的master测试故障转移,可以看到221切换成了master角色
[root@node5 ~]# ps -ef|grep redis
root 3098 1 0 21:57 ? 00:00:02 redis-server 0.0.0.0:6379 [cluster]
root 4649 20237 0 22:20 pts/0 00:00:00 grep redis
[root@node5 ~]#
[root@node5 ~]# kill -9 3098
[root@node1 ~]# redis-cli -c cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467210097070 8 connected 5461-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master,fail – 1467210059529 1467210058425 5 disconnected
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 slave fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 0 1467210097573 6 connected
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 10923-16383
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master – 0 1467210098576 6 connected 0-5460
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master,fail – 1467209864421 1467209863721 4 disconnected
kill 225上的master测试故障转移,可以看到222切换成了master角色
[root@node6 ~]# redis-cli shutdown
[root@node1 ~]# redis-cli -c cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467210296440 8 connected 5461-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master,fail – 1467210059529 1467210058425 5 disconnected
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 master – 0 1467210297444 10 connected 0-5460
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 10923-16383
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master,fail - 1467210229709 1467210229107 6 disconnected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master,fail – 1467209864421 1467209863721 4 disconnected
 
如果将没有slave的master节点kill掉,集群将不可用
[root@node1 ~]# redis-cli -c
127.0.0.1:6379> get name
(error) CLUSTERDOWN The cluster is down
 
 
 
Redis Cluster 添加删除节点
添加master节点
172.172.178.221:6380是新增的主节点,172.172.178.222:6379是cluster中任意节点
[root@node1 ~]# redis-trib add-node 172.172.178.221:6380 172.172.178.222:6379
>>> Adding node 172.172.178.221:6380 to cluster 172.172.178.222:6379
>>> Performing Cluster Check (using node 172.172.178.222:6379)
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 172.172.178.221:6380 to make it join the cluster.
[OK] New node added correctly.
 
添加slave节点
172.172.178.221:6381是新增的从节点,172.172.178.222:6379是cluster中任意节点,–master-id是master节点的ID,–slave代表添加slave节点
[root@node1 ~]# redis-trib add-node –slave –master-id f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6381 172.172.178.222:6379
>>> Adding node 172.172.178.221:6381 to cluster 172.172.178.222:6379
>>> Performing Cluster Check (using node 172.172.178.222:6379)
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots: (0 slots) master
0 additional replica(s)
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 172.172.178.221:6381 to make it join the cluster.
Waiting for the cluster to join.
>>> Configure node as replica of 172.172.178.221:6380.
[OK] New node added correctly.
 
检查cluster信息
[root@node1 ~]# redis-cli cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467216584316 8 connected 5461-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 slave 9b3b5256f0107585d5379de7e76f526d071e8a11 0 1467216583314 8 connected
f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380 master - 0 1467216583314 0 connected
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 master – 0 1467216584316 10 connected 0-5460
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 10923-16383
2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381 slave f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 0 1467216584816 0 connected
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 slave e61cffa1d3040dd5523e5c79912b23732618b464 0 1467216585318 10 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 slave ff52c80b8477f50272a86893aec89dac153e9fbe 0 1467216585318 7 connected
[root@node1 ~]#
[root@node1 ~]# redis-trib check 172.172.178.220:6379
>>> Performing Cluster Check (using node 172.172.178.220:6379)
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots: (0 slots) master
1 additional replica(s)
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381
slots: (0 slots) slave
replicates f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
 
新添加的master节点没有slots,需要重新分配slot,否则存储数据无法被选中
[root@node1 ~]# redis-trib reshard 172.172.178.221:6380
>>> Performing Cluster Check (using node 172.172.178.221:6380)
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots: (0 slots) master
1 additional replica(s)
S: 2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381
slots: (0 slots) slave
replicates f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 1500
What is the receiving node ID? f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:all
Ready to move 1500 slots.
Source nodes:
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
Destination node:
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots: (0 slots) master
1 additional replica(s)
Resharding plan:
Moving slot 5461 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5462 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5463 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5464 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5465 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5466 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5467 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5468 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5469 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5470 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5471 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5472 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5473 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5474 from 9b3b5256f0107585d5379de7e76f526d071e8a11
…………………..
 
再次检查cluster信息
[root@node1 ~]# redis-cli -c cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467217411735 8 connected 5962-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 slave 9b3b5256f0107585d5379de7e76f526d071e8a11 0 1467217411735 8 connected
f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380 master – 0 1467217411234 12 connected 0-498 5461-5961 10923-11421
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 master – 0 1467217412739 10 connected 499-5460
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 11422-16383
2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381 slave f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 0 1467217412237 12 connected
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 slave e61cffa1d3040dd5523e5c79912b23732618b464 0 1467217410732 10 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 slave ff52c80b8477f50272a86893aec89dac153e9fbe 0 1467217410732 7 connected
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]# redis-trib check 172.172.178.221:6380
>>> Performing Cluster Check (using node 172.172.178.221:6380)
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots:0-498,5461-5961,10923-11421 (1499 slots) master
1 additional replica(s)
S: 2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381
slots: (0 slots) slave
replicates f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5962-10922 (4961 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:11422-16383 (4962 slots) master
1 additional replica(s)
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:499-5460 (4962 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
 
删除slave节点
[root@node1 ~]# redis-trib del-node 172.172.178.221:6381 '2cc2b30e09fde453fbd42a7330bbd65d216dc6cc' 、
>>> Removing node 2cc2b30e09fde453fbd42a7330bbd65d216dc6cc from cluster 172.172.178.221:6381
>>> Sending CLUSTER FORGET messages to the cluster…
>>> SHUTDOWN the node.
 
删除master节点
如果master节点有slave节点将slave节点转移到其他master节点,如果master节点有slot,去掉分配的slot然后删除master节点
[root@node1 ~]# redis-trib reshard 172.172.178.221:6380
>>> Performing Cluster Check (using node 172.172.178.221:6380)
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots:0-498,5461-5961,10923-11421 (1499 slots) master
0 additional replica(s)
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5962-10922 (4961 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:11422-16383 (4962 slots) master
1 additional replica(s)
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:499-5460 (4962 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 1500
What is the receiving node ID? ff52c80b8477f50272a86893aec89dac153e9fbe
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Source node #2:done
Ready to move 1500 slots.
Source nodes:
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots:0-498,5461-5961,10923-11421 (1499 slots) master
0 additional replica(s)
Destination node:
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:11422-16383 (4962 slots) master
1 additional replica(s)
Resharding plan:
Moving slot 0 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 1 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 2 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 3 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 4 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 5 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 6 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 7 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 8 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 9 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
……
 
[root@node1 ~]# redis-trib del-node 172.172.178.221:6380 'f8ccaffd5378d7380a0f8f57b9c8b6c825688a85'
>>> Removing node f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 from cluster 172.172.178.221:6380
>>> Sending CLUSTER FORGET messages to the cluster…
>>> SHUTDOWN the node.
[root@node1 ~]#
[root@node1 ~]# redis-cli -c cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467218665980 8 connected 5962-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 slave 9b3b5256f0107585d5379de7e76f526d071e8a11 0 1467218665478 8 connected
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 master – 0 1467218666480 10 connected 499-5460
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 13 connected 0-498 5461-5961 10923-16383
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 slave e61cffa1d3040dd5523e5c79912b23732618b464 0 1467218667484 10 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 slave ff52c80b8477f50272a86893aec89dac153e9fbe 0 1467218666982 13 connected
 
 
 
Redis cluster工作原理
Redis 集群的 TCP 端口(Redis Cluster TCP ports)
每个 Redis 集群节点需要两个 TCP 连接打开。正常的 TCP 端口用来服务客户端,例如 6379,加 10000 的端口用作数据端口,在上面的例子中就是 16379。 第二个大一些的端口用于集群总线(bus),也就是使用二进制协议的点到点通信通道。集群总线被节点用 于错误检测,配置更新,故障转移授权等等。客户端不应该尝试连接集群总线端口,而应一直与正常的 Redis 命令端口通信,但是要确保在防火墙中打开了这两个端口,否则 Redis 集群的节点不能相互通信。 命令端口和集群总线端口的偏移量一直固定为 10000。 注意,为了让 Redis 集群工作正常,对每个节点: 1. 用于与客户端通信的正常的客户端通信端口(通常为 6379)需要开放给所有需要连接集群的客户端 以及其他集群节点(使用客户端端口来进行键迁移)。 2. 集群总线端口(客户端端口加 10000)必须从所有的其他集群节点可达。 如果你不打开这两个 TCP 端口,集群就无法正常工作。
Redis 集群的数据分片(Redis Cluster data sharding)
Redis 集群没有使用一致性哈希,而是另外一种不同的分片形式,每个键概念上是被我们称为哈希槽 (hash slot)的东西的一部分。 Redis 集群有 16384 个哈希槽,我们只是使用键的 CRC16 编码对 16384 取模来计算一个指定键所属的 哈希槽。 每一个 Redis 集群中的节点都承担一个哈希槽的子集,例如,你可能有一个 3 个节点的集群,其中:
redis
redis
 节点 A 包含从 0 到 5500 的哈希槽。
 节点 B 包含从 5501 到 11000 的哈希槽。
 节点 C 包含从 11001 到 16384 的哈希槽。
这可以让在集群中添加和移除节点非常容易。例如,如果我想添加一个新节点 D,我需要从节点 A,B, C 移动一些哈希槽到节点 D。同样地,如果我想从集群中移除节点 A,我只需要移动 A 的哈希槽到 B 和 C。 当节点 A 变成空的以后,我就可以从集群中彻底删除它。 因为从一个节点向另一个节点移动哈希槽并不需要停止操作,所以添加和移除节点,或者改变节点持有 的哈希槽百分比,都不需要任何停机时间(downtime)。
Redis cluster 架构(Redis Cluster Architecture)
 redis-cluster 架构图
redis
redis
架构细节:
 所有的 redis 节点彼此互联(PING-PONG 机制),内部使用二进制协议优化传输速度和带宽.
 节点的 fail 是通过集群中超过半数的节点检测失效时才生效.
 客户端与 redis 节点直连,不需要中间 proxy 层.客户端不需要连接集群所有节点,连接集群中任何一个 可用节点即可
 redis-cluster 把所有的物理节点映射到[0-16383]slot 上,cluster 负责维护 node<->slot<->value
redis-cluster 选举:容错
redis
redis
 领领着选举过程是集群中所有 master 参与,如果半数以上 master 节点与 master 节点通信超过 (cluster-node-timeout),认为当前 master 节点挂掉.
 什么时候整个集群不可用(cluster_state:fail)
a:如果集群任意 master 挂掉,且当前 master 没有 slave.集群进入 fail 状态,也可以理解成集群的 slot 映 射[0-16383]不完成时进入 fail 状态. ps : redis-3.0.0.rc1 加入 cluster-require-full-coverage 参数,默认关闭, 打开集群兼容部分失败.
b:如果集群超过半数以上 master 挂掉,无论是否有 slave 集群进入 fail 状态.
ps:当集群不可用时,所有对集群的操作做都不可用,收到((error) CLUSTERDOWN The cluster is down) 错误.
Redis 集群的主从模型(Redis Cluster master-slave model)
为了当部分节点失效时,或者无法与大多数节点通信时仍能保持可用,Redis 集群采用每个节点拥有 1(主 服务自身)到 N 个副本(N-1 个附加的从服务器)的主从模型。 在我们的例子中,集群拥有 A,B,C 三个节点,如果节点 B 失效集群将不能继续服务,因为我们不再 有办法来服务在 5501-11000 范围内的哈希槽。 但是,如果当我们创建集群后(或者稍后),我们为每一个主服务器添加一个从服务器,这样最终的集群 就由主服务器 A,B,C 和从服务器 A1,B1,C1 组成,如果 B 节点失效系统仍能继续服务。 B1 节点复制 B 节点,于是集群会选举 B1 节点作为新的主服务器,并继续正确的运转。
redis
redis
Redis 集群的一致性保证(Redis Cluster consistency guarantees)
Redis 集群不保证强一致性。实践中,这意味着在特定的条件下,Redis 集群可能会丢掉一些被系统收 到的写入请求命令。
Redis 集群为什么会丢失写请求的第一个原因,是因为采用了异步复制。这意味着在写期间下面的事情 发生了:
 你的客户端向主服务器 B 写入。
 主服务器 B 回复 OK 给你的客户端。
 主服务器 B 传播写入操作到其从服务器 B1,B2 和 B3。
手动故障转移(Manual failover)
有时候在主服务器事实上没有任何故障的情况下强制一次故障转移是很有用的。例如,为了升级主服务 器节点中的一个进程,可以对其进行故障转移使其变为一个从服务器,这样最小化了对可用性的影响。
Redis 集群支持使用 CLUSTER FAILOVER 命令来手动故障转移,必须在你想进行故障转移的主服务的 其中一个从服务器上执行。
手动故障转移很特别,和真正因为主服务器失效而产生的故障转移要更安全,因为采取了避免过程中数 据丢失的方式,仅当系统确认新的主服务器处理完了旧的主服务器的复制流时,客户端才从原主服务器切 换到新主服务器。
添加新节点(Adding a new node)
添加一个新节点的过程基本上就是,添加一个空节点,然后,如果是作为主节点则移动一些数据进去, 如果是从节点则其作为某个节点的副本。
两种情况我们都会讨论,先从添加一个新的主服务器实例开始。
两种情况下,第一步要完成的都是添加一个空节点。
我们使用与其他节点相同的配置(端口号除外)在 7006 端口(我们已存在的 6 个节点已经使用了从 7000 到 7005 的端口)上开启一个新的节点,那么为了与我们之前的节点布局一致,你得这么做:
 在你的终端程序中开启一个新的标签窗口。
 进入 cluster-test 目录。
 创建一个名为 7006 的目录。
 在里面创建一个 redis.conf 的文件,类似于其它节点使用的文件,但是使用 7006 作为端口号。
 最后使用../redis-server ./redis.conf 启动服务器。
1
./redis-trib.rb add-node127.0.0.1:7006127.0.0.1:7000
添加副本节点(Adding a new node as a replica)
添加一个新副本可以有两种方式。显而易见的一种方式是再次使用 redis-trib,但是要使用—slave 选项, 像这样:
1
./redis-trib.rb add-node--slave127.0.0.1:7006127.0.0.1:7000
注意,这里的命令行完全像我们在添加一个新主服务器时使用的一样,所以我们没有指定要给哪个主服 务器添加副本。这种情况下,redis-trib 会添加一个新节点作为一个具有较少副本的随机的主服务器的副本。
但是,你可以使用下面的命令行精确地指定你想要的主服务器作为副本的目标:
1 2
./redis-trib.rb add-node--slave--master-id3c3a0c74aae0b56170ccb03a76b60cfe7dc1912e127. 0.0.1:7006127.0.0.1:7000
移除节点(Removing a node)
要移除一个从服务器节点,只要使用 redis-trib 的 del-node 命令就可以:
1
./redis-trib del-node127.0.0.1:7000<node-id>
升级节点(Upgrading nodes in a Redis Cluster)
升级从服务器节点很简单,因为你只需要停止节点然后用已更新的 Redis 版本重启。如果有客户端使用 从服务器节点分离读请求,它们应该能够在某个节点不可用时重新连接另一个从服务器。
升级主服务器要稍微复杂一些,建议的步骤是:
1. 使用 CLUSTER FAILOVER 来触发一次手工故障转移主服务器(请看本文档的手工故障转移小 节)。
2. 等待主服务器变为从服务器。
3. 像升级从服务器那样升级这个节点。
4. 如果你想让你刚刚升级的节点成为主服务器,触发一次新的手工故障转移,让升级的节点重新变 回主服务器。
 
 
 
 
Redis 3.0 Cluster配置文档
目录
准备阶段
安装Cluster
管理cluster
(1) 添加master节点
(2) 数据分片
(3) 删除节点
 
Redis 3.0概述
Redis 是一个高性能的key-value数据库。 redis的出现,很大程度补偿了memcached这类keyvalue存储的不足,在部分场合可以对关系数据库起到很好的补充作用。它提供了Python,Ruby,Erlang,PHP客户端,使用很方便。3.0版本加入cluster功能,解决了redis单点无法横向扩展问题。
架构拓扑
3.0版本最大的特点就是支持cluster分布式横向扩展,下面为3个Master节点以及3个slave节点的拓扑图:
 
 
APP1
M1,M2,M3为redis三个主节点,S1,S2,S3为redis三个从节点,分别为M1,M2,M3备份数据以及故障切换使用。APP访问数据库可以通过连接任意一个Master节点实现。在三个Master节点的redis集群中,只容许有一个Master出故障,当多于一个Master宕机时,redis即不可用。当其中一个Master出现故障,其对应的Slave会接管故障Master的服务,保证redis 数据库的正常使用。
准备阶段
(1) 安装包
http://redis.io
(2) 系统包
安装gcc:yum install gcc
安装zlib:yum install zlib
安装ruby:yum install ruby
安装rubygems:yum install rubygems
安装gem redis:(下载:http://rubygems.org/gems/redis/versions/3.0.7
# gem install -l /tmp/redis-3.0.7.gem
Successfully installed redis-3.0.7
1 gem installed
Installing ri documentation for redis-3.0.7...
Installing RDoc documentation for redis-3.0.7...
(3) 系统参数
修改open files:# ulimit -n 10032 (默认1024) #可以打开最大文件描述符的数量,ulimit 用于限制 shell 启动进程所占用的资源
添加vm.overcommit_memory=1:
#vi /etc/sysctl.conf
#sysctl vm.overcommit_memory=1 #参数vm.overcommit_memory控制着linux的内存分配策略,1 表示一直允许overcommit(过量使用),可以避免数据被截
关闭hugepage:# echo never > /sys/kernel/mm/transparent_hugepage/enabled #
取消对透明巨页内存(transparent huge pages)的支持,因为这会造成 redis 使用过程产生延时和内存访问问题
修改somaxconn :# echo 511 >/proc/sys/net/core/somaxconn #限制了接收新 TCP 连接侦听队列的大小,对于一个经常处理新连接的高负载 web服务环境来说,默认的 128 太小了, 太小了。大多数环境这个值建议增加到 1024 或者更多。
关闭防火墙:# service iptables stop
关闭selinux:# vi /etc/sysconfig/selinux 修改“SELINUX=disabled”
安装Cluster
(1) 安装软件
# cd /redis/redis-3.0.0
# make
# make install
 
(2) 拷贝bin文件
# cp /redis/redis-3.2.1/src/redis-trib.rb /usr/local/bin/
# cp redis-cli /usr/local/bin/
# cp redis-server /usr/local/bin/
# which redis-trib.rb
/usr/local/bin/redis-trib.rb
 
(3) 配置通用config文件redis-common.conf
# vi /redis/redis-3.0.0/config/redis-common.conf
代码如下:
#GENERAL daemonize yes tcp-backlog 511 timeout 0 tcp-keepalive 0 loglevel notice databases 16 dir /redis/redis-3.0.0/data slave-serve-stale-data yes slave-read-only yes #not use default repl-disable-tcp-nodelay yes slave-priority 100 appendonly yes appendfsync everysecno-appendfsync-on-rewrite yes auto-aof-rewrite-min-size 64mb lua-time-limit 5000 cluster-enabled yes cluster-node-timeout 15000 cluster-migration-barrier 1 slowlog-log-slower-than 10000 slowlog-max-len 128 notify-keyspace-events "" hash-max-ziplist-entries 512 hash-max-ziplist-value 64 list-max-ziplist-entries 512 list-max-ziplist-value 64 set-max-intset-entries 512 zset-max-ziplist-entries 128 zset-max-ziplist-value 64 activerehashing yes client-output-buffer-limit normal 0 0 0 client-output-buffer-limit slave 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 hz 10 aof-rewrite-incremental-fsync yes
 
(4) 节点1配置文件redis-6379.conf
# vi /redis/redis-3.0.0/config/redis-6379.conf
代码如下:
include /redis/redis-3.0.0/config/redis-common.conf port 6379 logfile "/redis/redis-3.0.0/log/redis-6379.log" maxmemory 100m # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations maxmemory-policy allkeys-lru appendfilename "appendonly-6379.aof" dbfilename dump-6379.rdb #dir /redis/redis-3.0.0/data cluster-config-file nodes-6379.conf auto-aof-rewrite-percentage 80-100
 
 
(5) 节点2配置文件redis-6389.conf
# vi /redis/redis-3.0.0/config/redis-6389.conf
代码如下:
include /redis/redis-3.0.0/config/redis-common.conf port 6389 logfile "/redis/redis-3.0.0/log/redis-6389.log" maxmemory 100m # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations maxmemory-policy allkeys-lru appendfilename "appendonly-6389.aof" dbfilename dump-6389.rdb cluster-config-file nodes-6389.conf auto-aof-rewrite-percentage 80-100
 
 
(6) 节点3配置文件redis-6399.conf
# vi /redis/redis-3.0.0/config/redis-6399.conf
代码如下:
include /redis/redis-3.0.0/config/redis-common.conf port 6399 logfile "/redis/redis-3.0.0/log/redis-6399.log" maxmemory 100m # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations maxmemory-policy allkeys-lru appendfilename "appendonly-6399.aof" dbfilename dump-6399.rdb cluster-config-file nodes-6399.conf auto-aof-rewrite-percentage 80-100
 
 
(7) 启动redis节点
# redis-server redis-6379.conf
# redis-server redis-6389.conf
# redis-server redis-6399.conf
# redis-server redis-7379.conf
# redis-server redis-7389.conf
# redis-server redis-7399.conf
 
# ps -ef| grep redis
root 4623 1 0 11:07 ? 00:00:00 redis-server *:6379 [cluster]
root 4627 1 0 11:07 ? 00:00:00 redis-server *:6389 [cluster]
root 4631 1 0 11:07 ? 00:00:00 redis-server *:6399 [cluster]
root 4654 1 0 11:30 ? 00:00:00 redis-server *:7379 [cluster]
root 4658 1 0 11:30 ? 00:00:00 redis-server *:7389 [cluster]
root 4662 1 0 11:30 ? 00:00:00 redis-server *:7399 [cluster]
 
 
 
指定主从
redis-trib.rb create --replicas 1 172.31.103.211:6379 172.31.103.210:6379 172.31.103.209:6379 172.31.103.211:6389 172.31.103.210:6389 172.31.103.209:6389 前三个为主节点,后三个为从节点
 
(8) 通过redis-trib创建cluster
#--replicas 则指定了为Redis Cluster中的每个Master节点配备几个Slave节点
# redis-trib.rb create --replicas 1 10.27.17.115:6379 10.27.17.115:6389 10.30.191.77:6379 10.30.191.77:6389 10.27.16.140:6379 10.27.16.140:6389
>>> Creating cluster
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
192.168.3.88:6379
192.168.3.88:6389
192.168.3.88:6399
Adding replica 192.168.3.88:7379 to 192.168.3.88:6379
Adding replica 192.168.3.88:7389 to 192.168.3.88:6389
Adding replica 192.168.3.88:7399 to 192.168.3.88:6399
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join......
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
M: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) master
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
M: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) master
replicates d1d124d35c848e9c8e726b59af669c9196557869
M: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) master
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
 
(9) 检查集群状态
连接任意节点,执行redis-trib.rb
# redis-trib.rb check 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
 
管理cluster
(1) 添加master节点
添加的master节点配置在另一个服务器上,首先配置config文件
# vi /redis/redis-3.0.0/config/redis-6379.conf
# vi /redis/redis-3.0.0/config/redis-7379.conf
 
使用redis-trib.rb添加节点
在已有集群服务器(192.168.3.88)上执行
(注意:add-node的使用方法为new_host:new_port existing_host:existing_port,前面是新添加的节点信息,后面是已存在的节点信息)
# redis-trib.rb add-node 192.168.3.61:6379 192.168.3.88:6379
>>> Adding node 192.168.3.61:6379 to cluster 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
Connecting to node 192.168.3.61:6379: OK
>>> Send CLUSTER MEET to node 192.168.3.61:6379 to make it join the cluster.
[OK] New node added correctly.
 
选择其中一个节点,检查集群状态,发现刚添加的节点已经在集群中了,角色是master,但是并没有slot分配到新加的节点上,后面要通过shard命令分配slot。
# redis-trib.rb check 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots: (0 slots) master
0 additional replica(s)
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
 
主节点添加完毕后,需要给该主节点添加一个slave节点,添加过程和主节点一直,添加完成后需要在redis中进行设置。
# redis-trib.rb add-node 192.168.3.61:7379 192.168.3.88:6379
显示内容省略
 
链接到要添加的slave数据库中,执行replicate操作。后面的ID为Master 192.168.3.61:6379的ID,通过redis-trib.rb check可以看到。
# redis-cli -c -h 192.168.3.61 -p 7379
192.168.3.61:7379> cluster replicate 89be535ff56586dcec56f14122add80d89a57bb3
OK
 
根据check结果,可以看到新添加的slave以及成功和Master建立联系。
# redis-trib.rb check 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots: (0 slots) master
1 additional replica(s)
S: 92017f0258675b02a7799726339efabf7d005f8c 192.168.3.61:7379
slots: (0 slots) slave
replicates 89be535ff56586dcec56f14122add80d89a57bb3
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
 
(2) 数据分片
加入新的节点后,需要将其他的节点中的hash slot移动到新的节点中,以达到负载均衡的效果,指定集群中其中一个节点的地址
# redis-trib.rb reshard 192.168.3.6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots: (0 slots) master
1 additional replica(s)
S: 92017f0258675b02a7799726339efabf7d005f8c 192.168.3.61:7379
slots: (0 slots) slave
replicates 89be535ff56586dcec56f14122add80d89a57bb3
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)?4096 --16384/4=4096.master建议为基数
What is the receiving node ID? 89be535ff56586dcec56f14122add80d89a57bb3 --新加的主节点ID
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:all --从所有其他master节点均匀把slot移动到新加的主节点
.....
.....
Moving slot 12284 from 192.168.3.88:6399 to 192.168.3.61:6379:
Moving slot 12285 from 192.168.3.88:6399 to 192.168.3.61:6379:
Moving slot 12286 from 192.168.3.88:6399 to 192.168.3.61:6379:
Moving slot 12287 from 192.168.3.88:6399 to 192.168.3.61:6379:
 
再次check,发现所有主节点的slot都变成4096了
# redis-trib.rb check 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:1365-5460 (4096 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:6827-10922 (4096 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots:0-1364,5461-6826,10923-12287 (4096 slots) master
1 additional replica(s)
S: 92017f0258675b02a7799726339efabf7d005f8c 192.168.3.61:7379
slots: (0 slots) slave
replicates 89be535ff56586dcec56f14122add80d89a57bb3
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:12288-16383 (4096 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
 
(3) 删除节点
删除主节点之前,需要先将slot迁移到其他主节点上
# redis-trib.rb reshard 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:1365-5460,12288-16383 (8192 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:6827-10922 (4096 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots:0-1364,5461-6826,10923-12287 (4096 slots) master
1 additional replica(s)
S: 92017f0258675b02a7799726339efabf7d005f8c 192.168.3.61:7379
slots: (0 slots) slave
replicates 89be535ff56586dcec56f14122add80d89a57bb3
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots: (0 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4906
What is the receiving node ID? 89be535ff56586dcec56f14122add80d89a57bb3
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:d1d124d35c848e9c8e726b59af669c9196557869
Source node #2:done
……
……
Moving slot 10920 from d1d124d35c848e9c8e726b59af669c9196557869
Moving slot 10921 from d1d124d35c848e9c8e726b59af669c9196557869
Moving slot 10922 from d1d124d35c848e9c8e726b59af669c9196557869
Do you want to proceed with the proposed reshard plan (yes/no)?yes
……
……
Moving slot 10920 from 192.168.3.88:6389 to 192.168.3.61:6379:
Moving slot 10921 from 192.168.3.88:6389 to 192.168.3.61:6379:
Moving slot 10922 from 192.168.3.88:6389 to 192.168.3.61:6379:
 
检查节点的slot是否完全迁移走,完成后就可以删除节点了
# redis-trib.rb check 192.168.3.88:6399
# redis-trib.rb del-node 192.168.3.88:6399 d64223d6695fcc7e1030f219f09d7488c438cf39
>>> Removing node d64223d6695fcc7e1030f219f09d7488c438cf39 from cluster 192.168.3.88:6399
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7379: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:7399: OK
>>> Sending CLUSTER FORGET messages to the cluster...
>>> 192.168.3.88:7399 as replica of 192.168.3.88:6399
/usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis/client.rb:97:in `call': ERR Can't forget my master!(Redis::CommandError)
from /usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis.rb:2432:in `method_missing'
from /usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis.rb:37:in `synchronize'
from /usr/lib/ruby/1.8/monitor.rb:242:in `mon_synchronize'
from /usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis.rb:37:in `synchronize'
from /usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis.rb:2431:in `method_missing'
from /usr/local/bin/redis-trib.rb:1086:in `delnode_cluster_cmd'
from /usr/local/bin/redis-trib.rb:1078:in `each'
from /usr/local/bin/redis-trib.rb:1078:in `delnode_cluster_cmd'
from /usr/local/bin/redis-trib.rb:1373:in `send'
from /usr/local/bin/redis-trib.rb:1373
删除主节点之前,需要先删除主节点的slave节点,否则会报如上错误
# redis-trib.rb del-node 192.168.3.88:7399 d013aee7cae8163f787cb6445778ff97bf66ce17
>>> Removing node d013aee7cae8163f787cb6445778ff97bf66ce17 from cluster 192.168.3.88:7399
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:6389: OK
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.
# redis-trib.rb del-node 192.168.3.88:6399 d64223d6695fcc7e1030f219f09d7488c438cf39
>>> Removing node d64223d6695fcc7e1030f219f09d7488c438cf39 from cluster 192.168.3.88:6399
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7379: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:7379: OK
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.
 
 
 
 
 
posted on 2018-06-12 14:39  sy靡不有初  阅读(3700)  评论(0编辑  收藏  举报