redis.cluster/memcached.cluster/wmware esxi
1. 安装配置redis的cluster 集群
redis 集群高可用
实验环境
192.168.198.131 openvpn-server #42-Ubuntu SMP Mon Jun 8 14:14:24 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
192.168.198.132 openvpn-node1 #42-Ubuntu SMP Mon Jun 8 14:14:24 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
192.168.198.133 openvpn-node2 #42-Ubuntu SMP Mon Jun 8 14:14:24 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
192.168.198.134 openvpn-node3 #42-Ubuntu SMP Mon Jun 8 14:14:24 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
redis 安装
root@openvpn-server:~# apt list redis #主节点俩张网卡主网卡可以上网
正在列表... 完成
redis/focal,now 5:5.0.7-2 all [已安装]
#apt-get dowload redis <node节点提示缺啥包下载啥包>
#其他节点只有内网网卡这里本地安装
root@openvpn-node1:/opt# ls
libhiredis0.14_0.14.0-6_amd64.deb liblua5.1-0_5.1.5-8.1build4_amd64.deb offlinePackage.tar.gz redis-server_5%3a5.0.7-2_amd64.deb
libjemalloc2_5.2.1-1ubuntu1_amd64.deb lua-bitop_1.0.2-5_amd64.deb redis_5%3a5.0.7-2_all.deb redis-tools_5%3a5.0.7-2_amd64.deb
创建集群
#allnode
mkdir -p /var/lib/redis/redis6379
mkdir -p /var/lib/redis/redis6380
chown redis.redis /var/lib/redis/ -R
#openvpn-server
root@openvpn-server:~# cat /etc/redis/redis.conf | grep -v "#" | grep -v ";" | grep -v "^$"
bind 192.168.198.131 ::1
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis/redis-server6379.pid
loglevel notice
logfile /var/log/redis/redis-server6379.log
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error no
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis/redis6379
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
root@openvpn-server:~# cat /etc/redis/redis6380.conf | grep -v "#" | grep -v ";" | grep -v "^$"
bind 192.168.198.131 ::1
protected-mode no
port 6380
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis/redis-server6380.pid
loglevel notice
logfile /var/log/redis/redis-server6380.log
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error no
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis/redis6380
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly yes
appendfilename "appendonly6390.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
#openvpn-nodeall
#配置文件修改bind
redis-server /etc/redis/redis6379.conf
redis-server /etc/redis/redis6380.conf
#验证进程启动
#修改所有配置文件追加以下配置 重新启动集群
去除注释 requirepass 123123
去除注释 cluster-enabled yes
去除注释 cluster-config-file nodes-6379.conf
去除注释 masterauth 123123
关闭保护模式 protected-mode no
redis-server /etc/redis/redis6379.conf
redis-server /etc/redis/redis6380.conf
#添加node 到集群
redis-cli -a 123123 --cluster create 192.168.198.131:6379 192.168.198.131:6380 192.168.198.132:6379 192.168.198.132:6380 192.168.198.133:6379 192.168.198.133:6380 --cluster-replicas 1
# #提示这个报错需要清除key
root@openvpn-server:~# redis-cli -a 123123 --cluster create 192.168.198.131:6379 192.168.198.131:6380 192.168.198.132:6379 192.168.198.132:6380 192.168.198.133:6379 192.168.198.133:6380 --cluster-replicas 1
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
[ERR] Node 192.168.198.131:6380 is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0.
# 这个时候需要确认是否配置集群模式如果CTRL+c 集群初始化就会失败,需要删除备份文件重新初始化
验证集群
root@openvpn-node1:~# redis-cli -h 192.168.198.132
192.168.198.132:6379> auth 123123
OK
192.168.198.132:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:3
cluster_stats_messages_ping_sent:161
cluster_stats_messages_pong_sent:165
cluster_stats_messages_meet_sent:1
cluster_stats_messages_sent:327
cluster_stats_messages_ping_received:161
cluster_stats_messages_pong_received:162
cluster_stats_messages_meet_received:4
cluster_stats_messages_received:327
192.168.198.132:6379> info Replication
# Replication
role:master
connected_slaves:1
slave0:ip=192.168.198.133,port=6380,state=online,offset=504,lag=0
master_replid:f990cc3e2c1e6464daf5a68f559290e9ba5ac316
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:504
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:504
动态添加节点
添加新的主节点
root@openvpn-server:~# redis-cli -a 123123 --cluster add-node 192.168.198.134:6379 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
>>> Adding node 192.168.198.134:6379 to cluster 192.168.198.134:6380
>>> Performing Cluster Check (using node 192.168.198.134:6380)
M: ac337b65315bec4f20ede868cb0ef510daee6a4c 192.168.198.134:6380
slots:[0-16383] (16384 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.198.134:6379 to make it join the cluster.
[OK] New node added correctly.
#如果添加集群出现插槽分配不了就需要做修复
# redis-cli -a 123123 --cluster add-node 192.168.198.134:6379 192.168.198.131:6379
>>> Adding node 192.168.198.134:6379 to cluster 192.168.198.134:6380
>>> Performing Cluster Check (using node 192.168.198.134:6379)
M: c0767666bee76e5e0dc67f24031a3e1b574235cc 192.168.198.134:6379
slots: (0 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[ERR] Not all 16384 slots are covered by nodes.
redis-cli -a 123123 --cluster fix 192.168.198.134:6379
redis-cli -a 123123 --cluster fix 192.168.198.134:6380
#执行集群添加操作
redis-cli -a 123123 --cluster add-node 192.168.198.134:6379 192.168.198.131:6379
#添加集群检测
root@openvpn-server:~# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.134:6379 (0495462e...) -> 0 keys | 16384 slots | 1 slaves.
[OK] 0 keys in 1 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.134:6379)
M: 0495462ec6b3ba0a8148e1d278f8e1480f07fd80 192.168.198.134:6379
slots:[0-16383] (16384 slots) master
1 additional replica(s)
S: ac337b65315bec4f20ede868cb0ef510daee6a4c 192.168.198.134:6380
slots: (0 slots) slave
replicates 0495462ec6b3ba0a8148e1d278f8e1480f07fd80
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
#给新添加的node分配slot
redis-cli -a 123123 --cluster check 192.168.198.131:6379 #通过check查找nodeid
redis-cli -a 123123 --cluster reshard 192.168.198.134:6379 #按提示输入接收的slot插槽数和nodeid
#验证插槽分配
root@openvpn-server:~# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.131:6379 (8e67a61f...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.133:6379 (6cee8692...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.134:6379 (d22fe1b5...) -> 0 keys | 4096 slots | 0 slaves.
192.168.198.132:6379 (87492444...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.131:6379)
M: 8e67a61f686172b3102004ba6ad5654e52334673 192.168.198.131:6379
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: 7f4c6e84372f6da44c2da7596f075343cf08d2ae 192.168.198.131:6380
slots: (0 slots) slave
replicates 6cee8692e8a6af3d5e36022eaf894d04dae6e994
S: 959e1a09f86b444cc53386ec3fa5df18b913fbc7 192.168.198.133:6380
slots: (0 slots) slave
replicates 87492444a2400f19ab26446a6f7eb7acc07aea39
S: 16ead4bf4b15746e4c5ba8666b1b83d41c46a1d9 192.168.198.132:6380
slots: (0 slots) slave
replicates 8e67a61f686172b3102004ba6ad5654e52334673
M: 6cee8692e8a6af3d5e36022eaf894d04dae6e994 192.168.198.133:6379
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37 192.168.198.134:6379
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: 87492444a2400f19ab26446a6f7eb7acc07aea39 192.168.198.132:6379
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
添加从节点
添加新的从节点(两种方式)
redis-cli --cluster add-node 172.16.1.10:7008 172.16.1.10:7007 --cluster-slave (不指定主节点)
redis-cli --cluster add-node 172.16.1.10:7008 172.16.1.10:7007 --cluster-slave --cluster-master-id 54fd24815e6e95f96201ce387ba6e31cb18f40e9
验证
redis-cli --cluster check 192.168.198.131:6379
重新分片
redis-cli --cluster reshard 192.168.198.131:6379
删除从节点
redis-cli -a 123123 --cluster del-node 192.168.198.132:6380 847d164cac6bde073b097de40e4897b24b8b665b
删除主节点(需要把主节点得hash槽移动到其他主节点上)
redis-cli --cluster reshard 192.168.198.131:6379
redis-cli --cluster del-node 192.168.198.13x:6379 `节点id`
动态删除节点
迁移mster槽位到其他节点
#查看槽位信息
root@openvpn-node3:/etc/redis# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.131:6379 (8e67a61f...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.133:6379 (6cee8692...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.134:6379 (d22fe1b5...) -> 0 keys | 4096 slots | 0 slaves.
192.168.198.132:6379 (87492444...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.131:6379)
M: 8e67a61f686172b3102004ba6ad5654e52334673 192.168.198.131:6379
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: 7f4c6e84372f6da44c2da7596f075343cf08d2ae 192.168.198.131:6380
slots: (0 slots) slave
replicates 6cee8692e8a6af3d5e36022eaf894d04dae6e994
S: 959e1a09f86b444cc53386ec3fa5df18b913fbc7 192.168.198.133:6380
slots: (0 slots) slave
replicates 87492444a2400f19ab26446a6f7eb7acc07aea39
S: 16ead4bf4b15746e4c5ba8666b1b83d41c46a1d9 192.168.198.132:6380
slots: (0 slots) slave
replicates 8e67a61f686172b3102004ba6ad5654e52334673
M: 6cee8692e8a6af3d5e36022eaf894d04dae6e994 192.168.198.133:6379
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37 192.168.198.134:6379
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: 87492444a2400f19ab26446a6f7eb7acc07aea39 192.168.198.132:6379
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
#接下来将 192.168.198.132的槽位信息转移到192.168.198.131 节点
redis-cli -a 123123 --cluster reshard 192.168.198.131:6379
#重新分配槽位 多少个:4096
>>> Check slots coverage... [OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096 #迁移master上的多少个槽位
#分配给谁:192.168.198.131 nodeid
What is the receiving node ID? 8e67a61f686172b3102004ba6ad5654e52334673 #接收槽位的服务器ID
Please enter all the source node IDs. Type 'all' to use all the nodes as source nodes for the hash slots. Type 'done' once you entered all the source nodes IDs.
#分配谁的:192.168.192.132 nodeid
Source node #1: 87492444a2400f19ab26446a6f7eb7acc07aea39 #从哪个服务器迁移4096个槽位
Source node #2: done #写done,表示没有其他master了
Moving slot 5457 Do you want to proceed with the proposed reshard plan (yes/no)? yes #是否继续
验证槽位迁移完成
root@openvpn-server:~# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.131:6379 (8e67a61f...) -> 0 keys | 8192 slots | 1 slaves.
192.168.198.133:6379 (6cee8692...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.134:6379 (d22fe1b5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.132:6379 (87492444...) -> 0 keys | 0 slots | 0 slaves.
[OK] 0 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.131:6379)
M: 8e67a61f686172b3102004ba6ad5654e52334673 192.168.198.131:6379
slots:[1365-5460],[6827-10922] (8192 slots) master
1 additional replica(s)
S: 7f4c6e84372f6da44c2da7596f075343cf08d2ae 192.168.198.131:6380
slots: (0 slots) slave
replicates 6cee8692e8a6af3d5e36022eaf894d04dae6e994
S: 959e1a09f86b444cc53386ec3fa5df18b913fbc7 192.168.198.133:6380
slots: (0 slots) slave
replicates 8e67a61f686172b3102004ba6ad5654e52334673
S: 16ead4bf4b15746e4c5ba8666b1b83d41c46a1d9 192.168.198.132:6380
slots: (0 slots) slave
replicates d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37
M: 6cee8692e8a6af3d5e36022eaf894d04dae6e994 192.168.198.133:6379
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37 192.168.198.134:6379
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
1 additional replica(s)
M: 87492444a2400f19ab26446a6f7eb7acc07aea39 192.168.198.132:6379
slots: (0 slots) master #这里插槽就没了
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
从集群删除服务器
root@openvpn-server:~# redis-cli -a 123123 --cluster del-node 192.168.198.132:6379 87492444a2400f19ab26446a6f7eb7acc07aea39
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
>>> Removing node 87492444a2400f19ab26446a6f7eb7acc07aea39 from cluster 192.168.198.132:6379
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.
验证node是否删除
root@openvpn-server:~# redis-cli -a 123123 --cluster check 192.168.198.131:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.198.131:6379 (8e67a61f...) -> 0 keys | 8192 slots | 1 slaves.
192.168.198.133:6379 (6cee8692...) -> 0 keys | 4096 slots | 1 slaves.
192.168.198.134:6379 (d22fe1b5...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.198.131:6379)
M: 8e67a61f686172b3102004ba6ad5654e52334673 192.168.198.131:6379
slots:[1365-5460],[6827-10922] (8192 slots) master
1 additional replica(s)
S: 7f4c6e84372f6da44c2da7596f075343cf08d2ae 192.168.198.131:6380
slots: (0 slots) slave
replicates 6cee8692e8a6af3d5e36022eaf894d04dae6e994
S: 959e1a09f86b444cc53386ec3fa5df18b913fbc7 192.168.198.133:6380
slots: (0 slots) slave
replicates 8e67a61f686172b3102004ba6ad5654e52334673
S: 16ead4bf4b15746e4c5ba8666b1b83d41c46a1d9 192.168.198.132:6380
slots: (0 slots) slave
replicates d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37
M: 6cee8692e8a6af3d5e36022eaf894d04dae6e994 192.168.198.133:6379
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: d22fe1b5fc43a52d5e1e9b6908a82c4649fddf37 192.168.198.134:6379
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
模拟master 宕机
redis-cli -c -h 192.168.198.131 -a 123123
kill -9 2121212 #主进程
2. 安装配置memcached 高可用
memcache 本身没有像 redis 所具备的数据持久化功能,比如 RDB 和 AOF 都没有,但是可以通过做集群同步的方式,让各 memcache 服务器的数据进行同步,从而实现数据的一致性,即保证各 memcache
的数据是一样的,即使有任何一台 memcache 发生故障,只要集群种有一台 memcache 可用就不会出现数据丢失,当其他 memcache 重新加入到集群的时候可以自动从有数据的 memcache 当中自动获取数据并提供服务。Memcache 借助了操作系统的 libevent 工具做高效的读写。libevent 是个程序库,它将 Linux 的 epoll、BSD 类操作系统的 kqueue 等事件处理功能封装成统一的接口。即使对服务器的连接数增加,也能发挥高性能。memcached 使用这个 libevent 库,因此能在 Linux、BSD、Solaris 等操作系统上发挥其高性能。Memcache 支持最大的内存存储对象为 1M,超过 1M 的数据可以使用客户端压缩或拆分报包放到多个key 中,比较大的数据在进行读取的时候需要消耗的时间比较长,memcache 最适合保存用户的 session实现 session 共享,Memcached 存储数据时, Memcached 会去申请1MB 的内存, 把该块内存称为一个slab, 也称为一个 page。memcached具有多种语言的客户端开发包,包括:Perl/PHP/JAVA/C/Python/Ruby/C#/
2.1 单机部署
2.1.1 yum 安装与启动
通过yum安装相对简单
# yum install memcached -y
# vim /etc/sysconfig/memcached
PORT="11211" #监听端口
USER="memcached" #启动用户
MAXCONN="1024" #最大连接数
CACHESIZE="1024" #最大使用内存
OPTIONS="" #其他选项
2.1.2 python 操作memcache
#!/usr/bin/env python
#coding:utf-8
#Author:Zhang ShiJie
import memcache
m = memcache.Client(['172.18.200.106:11211'], debug=True)
for i in range(100):
m.set("key%d" % i,"v%d" % i)
ret = m.get('key%d' % i)
print ret
2.1.3 编译安装
# yum install libevent libevent-devel –y
# pwd
/usr/local/src
# tar xvf memcached-1.5.12.tar.gz
# ./configure --prefix=/usr/local/memcache
# make && make install
#启动 memcached
# /usr/local/memcache/bin/memcached -u memcached -p 11211 -m 2048 -c 65536 &
2.2 memcached 集群部署
2.2.1 基于magent 的部署架构
该部署方式依赖于 magent 实现高可用,应用端通过负载服务器连接到 magent,然后再由 magent代理用户应用请求到 memcached 处理,底层的 memcached 为双主结构会自动同步数据,本部署方式存在 magent 单点问题因此需要两个 magent 做高可用。
2.2.2 repcached 实现原理
在 master 上可以通过 -X 指定 replication port,在 slave 上通过 -x/-X 找到 master 并 connect 上去,事实上,如果同时指定了 -x/-X, repcached 一定会尝试连接,但如果连接失败,它就会用 -X 参数来自己 listen(成为 master);如果 master坏掉,slave侦测到连接断了,它会自动 listen而成为 master;而如果 slave 坏掉, master 也会侦测到连接断,它就会重新 listen 等待新的 slave 加入。从这方案的技术实现来看,其实它是一个单 master 单 slave 的方案,但它的 master/slave 都是可读写的,而且可以相互同步,所以从功能上看,也可以认为它是双机 master-master 方案。
2.2.3 简化后的部署架构
magent 已经有很长时间没有更新,因此可以不再使用 magent,直接通过负载均衡连接之 memcached,
任然有两台 memcached 做高可用,memcached 会自动同步数据保持数据一致性,即使一台 memcached故障也不影响业务正常运行,故障的 memcached 修复上线后再自动从另外一台同步数据即可保持数据一致性。
2.2.4 部署 repcached
http://repcached.sourceforge.net/
[root@s6 src]# yum install libevent libevent-devel
[root@s6 src]# wget https://sourceforge.net/projects/repcached/files/repcached/2.2.1-1.2.8/memcached-1.2.8-repcached-2.2.1.tar.gz
[root@s6 src]# tar xvf memcached-1.2.8-repcached-2.2.1.tar.gz
[root@s6 src]# cd memcached-1.2.8-repcached-2.2.1
[root@s6 memcached-1.2.8-repcached-2.2.1]# ./configure --prefix=/usr/local/repcached --enablereplication
[root@s6 memcached-1.2.8-repcached-2.2.1]# make #报错如下
解决办法:
[root@s6 memcached-1.2.8-repcached-2.2.1]# vim memcached.c
56 #ifndef IOV_MAX
57 #if defined(__FreeBSD__) || defined(__APPLE__)
58 # define IOV_MAX 1024
59 #endif
60 #endif
改为如下内容:
55 /* FreeBSD 4.x doesn't have IOV_MAX exposed. */
56 #ifndef IOV_MAX
57 # define IOV_MAX 1024
58 #endif
再次编译安装:
[root@s6 memcached-1.2.8-repcached-2.2.1]# make && make install
2.2.5 验证是否可执行
[root@s5 memcached-1.2.8-repcached-2.2.1]# /usr/local/repcached/bin/memcached -h
memcached 1.2.8
repcached 2.2.1
-p <num> TCP port number to listen on (default: 11211)
-U <num> UDP port number to listen on (default: 11211, 0 is off)
-s <file> unix socket path to listen on (disables network support)
-a <mask> access mask for unix socket, in octal (default 0700)
-l <ip_addr> interface to listen on, default is INDRR_ANY
-d run as a daemon
-r maximize core file limit
-u <username> assume identity of <username> (only when run as root)
-m <num> max memory to use for items in megabytes, default is 64 MB
-M return error on memory exhausted (rather than removing items)
-c <num> max simultaneous connections, default is 1024
-k lock down all paged memory. Note that there is a
limit on how much memory you may lock. Trying to
allocate more than that would fail, so be sure you
set the limit correctly for the user you started
the daemon with (not for -u <username> user;
under sh this is done with 'ulimit -S -l NUM_KB').
-v verbose (print errors/warnings while in event loop)
-vv very verbose (also print client commands/reponses)
-h print this help and exit
-i print memcached and libevent license
-P <file> save PID in <file>, only used with -d option
-f <factor> chunk size growth factor, default 1.25
-n <bytes> minimum space allocated for key+value+flags, default 48
-R Maximum number of requests per event
limits the number of requests process for a given con nection
to prevent starvation. default 20
-b Set the backlog queue limit (default 1024)
-x <ip_addr> hostname or IP address of peer repcached
-X <num:num> TCP port number for replication. <listen:connect> (default: 11212)
2.2.6 启动memcache
通过 repcached 安装的 memcached 命令启动 memcache 服务并实现 memcache 主备结构,其中-x 为对
方即主 memcache 的 IP,-X 为本地启动的用数据同步的端口:
2.2.6.1 server 1 相关操作
[root@s5 ~]# /usr/local/repcached/bin/memcached -d -m 2048 -p 11211 -u root -c 2048 -x 172.18.200.106 -X 16000
2.2.6.2 server 2 相关操作
[root@s6 src]# /usr/local/repcached/bin/memcached -d -m 2048 -p 11211 -u root -c 2048 -x 172.18.200.105 -X 16000
2.2.7 连接memcache 验证数据
2.2.7.1 shell 命令
[root@s6 src]# telnet 172.18.200.106 11211
Trying 172.18.200.106...
Connected to 172.18.200.106.
Escape character is '^]'.
set name 0 0 4
jack
STORED
get name
VALUE name 0 4
jack
END
quit
Connection closed by foreign host.
[root@s6 src]# telnet 172.18.200.105 11211
Trying 172.18.200.105...
Connected to 172.18.200.105.
Escape character is '^]'.
get name
VALUE name 0 4
jack
END
quit
Connection closed by foreign host.
[root@s6 src]
2.2.7.2 python 脚本
#!/usr/bin/env python
#coding:utf-8
#Author:Zhang ShiJie
import memcache
m = memcache.Client(['172.18.200.106:11211'], debug=True)
for i in range(100):
m.set("key%d" % i,"v%d" % i)
ret = m.get('key%d' % i)
print ret
3. 安装wmware esxi
3.1 下载安装包
官网:wmware.com
下载页面:ESXI_6.x
3.2 制作u启
下载后制作启动U盘。推荐Rufus工具。
直接将系统镜像包ISO写入U盘:
3.3 安装esxi
vm 开机按F6
#------------------如果需要配置静态路由
摁F2进入个性设置,输入账号和密码
选择进入Configure Management Network --> IPv4 Configuration
摁空格键(Space)选择Set static IPv4 address and network configuration:
IPv4 Address [10.10.10.11]
Subnet Mask [255.255.255.0]
Default Gateway [10.10.10.10]
电脑端IP
web 输入IP https://10.10.10.11
输入用户密码
3.4 后台配置
3.4.1 激活esxi
先激活一下:
在主机->管理->许可(下面选一个输入进去,检查许可证,然后分配许可证)
0A65P-00HD0-3Z5M1-M097M-22P7H
HV4WC-01087-1ZJ48-031XP-9A843
3.4.2 配置网卡
首先是配置第一个网口开启混合模式。
3.4.3 添加内部硬盘
在软路由安装时,只会自动格式化你安装ESXI的那个硬盘。
所以,即使我配备了两个硬盘250G的ssd和1T的SEAGATE。但看不到SEAGATE这个硬盘,这时就需要我们将其格式化为VMFS格式。
3.4.4 设置自动启动
对于设备断电问题,就会导致ESXI重启,但安装的虚拟机系统没有自动启动。
ESXI正好提供自动启动的设置。
3.5 其他
3.5.1 升级esxi
下载地址:(注意需要登录vmware)
下载后上传到esxi 的存储里面
根据上传的路径,拼接得到绝对路径:/vmfs/volumes/5d926a65-f446c226-6a8a-00e0670f2312/images/update-from-esxi6.7-6.7_update03.zip
然后进入ESXI的ssh命令终端。
执行如下命令
# 查看可更新项目
esxcli software sources profile list -d /vmfs/volumes/5d926a65-f446c226-6a8a-00e0670f2312/images/update-from-esxi6.7-6.7_update03.zip
# 可以看到多个,就选标准版吧:ESXi-6.7.0-20190802001-standard
# 安装指定更新
esxcli software profile update -d /vmfs/volumes/5d926a65-f446c226-6a8a-00e0670f2312/images/update-from-esxi6.7-6.7_update03.zip -p ESXi-6.7.0-20190802001-standard
# 等待以下就安装好了,然后执行重启命令
reboot
3.5.2 修改默认的esxi web管理端口
#---------------------直接在终端修改配置
# 进入vi编辑模式
vi /etc/vmware/rhttpproxy/config.xml
3.5.3 修改esxi 主机名
ssh shell修改主机名:
esxcli system hostname set --domain kioye.cn
esxcli system hostname set --host esxi
3.5.4 更换ssl 证书
# 进入对应目录
cd /etc/vmware/ssl
# 替换rui.crt和rui.key
# 重启服务
service.sh restart
3.5.5 使用vm 软件连接esxi
直接在VMware 添加sexi 主机节点即可
如果当你发现自己的才华撑不起野心时,那就请你安静下来学习