Lvs+Keepalived+Bind+web构建高可用负载均衡系统

原创作品,允许转载,转载时请务必以超链接形式标明文章 原始出处 、作者信息和本声明。否则将追究法律责任。http://hatech.blog.51cto.com/8360868/1417899

-------------------------------

一、前言

二、环境

三、配置

1.LB-Master及LB-Backup配置

(1)LB-Master及LB-Backup安装keepalived和ipvsadm

(2)LB-Master的keepalived主配置文档

(3)LB-Backup的keepalived主配置文档

2.DNS-Master及DNS-Backup配置

(1)DNS-Master及DNS-Backup网卡设置

 

(2)DNS-Master及DNS-Backup安装bind及ARP设置

(3)DNS-Master配置

(4)DNS-Backup配置

3.web1及web2配置

四、测试

五、故障模拟

  1. 模拟LB-Master失效

  2. 模拟DNS-Master失效

  3. 模拟web1失效

-------------------------------

 

一、前言

    这个实验是实现企业高可用负载均衡的web案例,无论Master或Backup宕掉任意一台,业务照常可用。分析拓扑,其中LB-Master负责web的调度,LB-Master负责DNS的调度,实现负载均衡,如果其中一台出现故障,那么web和DNS服务会立即转移到另一台服务器上,DNS分为主辅同步,实现高可用,web在生产环境下页面应该是一致的,为了试验效果,web页面设置不一样,在实际环境下,web后端应架设共享存储,实现web页面的一致性。

二、环境

系统:CentOS6.4 32位 6台

拓扑图:

wKiom1OEid-Bxq3DAAFudNGR0WU871.jpg

IP划分:

wKiom1OElk2B8142AAFVRvSvSMI835.jpg

 

三、配置

1.LB-Master及LB-Backup配置

(1)LB-Master及LB-Backup安装keepalived和ipvsadm

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# yum groupinstall "Additional Development"         //安装开发工具
# yum groupinstall "Development tools"
# tar -zxvf keepalived-1.2.1.tar.gz -C /usr/local/src/
# cd /usr/local/src/keepalived-1.2.1
# ./cnfigure
Keepalived configuration
------------------------
Keepalived version       : 1.2.1
Compiler                 : gcc
Compiler flags           : -g -O2
Extra Lib                : -lpopt -lssl -lcrypto 
Use IPVS Framework       : No    //配置出现错误
IPVS sync daemon support : No
Use VRRP Framework       : Yes
Use Debug flags          : No
解决方法:
# yum install kernel-devel ipvsadm
# ln -s /usr/src/kernels/2.6.32-358.el6.i686/ /usr/src/linux
# ./cnfigure                    //再次配置环境
# make                          //编译
# make install                  //安装
  
# cd /usr/local/etc             //keepalived默认安装路径
# ll
drwxr-xr-x. 3 root root 4096 May 24 00:37 keepalived
drwxr-xr-x. 3 root root 4096 May 24 00:29 rc.d
drwxr-xr-x. 2 root root 4096 May 24 00:29 sysconfig
   
配置以系统方式service启动
# cp /usr/local/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/ 
# cp /usr/local/etc/sysconfig/keepalived  /etc/sysconfig/
# mkdir /etc/keepalived
# cp /usr/local/etc/keepalived/keepalived.conf /etc/keepalived/
# cp /usr/local/sbin/keepalived /usr/sbin/

 

(2)LB-Master的keepalived主配置文档

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# cat /etc/keepalived/keepalived.conf
#guration File for keepalived
#global define
global_defs {
        router_id Haweb_1
        }
vrrp_sync_group VGM {
        group {
        VI_WEB
        }
}
vrrp_sync_group VGN {
        group {
        HA_DNS
        }
}
 
# vvrp_instance define #
vrrp_instance VI_WEB {
        state MASTER
        interface eth0
        lvs_sync_daemon_inteface eth0
        virtual_router_id 55
        priority 100
        advert_int 5
        authentication {
            auth_type PASS
            auth_pass 123456
        }
    virtual_ipaddress {
            192.168.2.200/24 dev eth0
        }
}
##########  LVS  ###########
virtual_server 192.168.2.200 80 {
    delay_loop 6
    lb_algo rr
    lb_kind DR
#   persistence_timeout 20
    protocol TCP
     
    real_server 192.168.2.50 80 {
        weight 100
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 192.168.2.60 80 {
        weight 100
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}
###   HA DNS START   ###
vrrp_instance HA_DNS {
        state BACKUP
        interface eth0
        lvs_sync_daemon_inteface eth0
        virtual_router_id 56
        priority 90
        advert_int 5
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                192.168.2.100/24 dev eth0
        }
}
##########  LVS DNS  ###########
virtual_server 192.168.2.100 53 {
        delay_loop 6
        lb_algo rr
        lb_kind DR
#   persistence_timeout 6
        protocol UDP 
 
        real_server 192.168.2.30 53 {
                weight 100
                TCP_CHECK {
                        connect_timeout 3
                        nb_get_retry 3
                        delay_before_retry 3
                        connect_port 53
                }
        }
        real_server 192.168.2.40 53 {
                weight 100
                TCP_CHECK {
                        connect_timeout 3
                        nb_get_retry 3
                        delay_before_retry 3
                        connect_port 53
                }
        }
}
 
##################################

 

(3)LB-Backup的keepalived主配置文档

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# cat /etc/keepalived/keepalived.conf
#guration File for keepalived
#global define
global_defs {
        router_id Haweb_1
        }
vrrp_sync_group VGM {
        group {
        VI_WEB
        }
}
vrrp_sync_group VGN {
        group {
        HA_DNS
        }
}
 
# vvrp_instance define #
vrrp_instance VI_WEB {
        state BACKUP
        interface eth0
        lvs_sync_daemon_inteface eth0
        virtual_router_id 55
        priority 90
        advert_int 5
        authentication {
            auth_type PASS
            auth_pass 123456
        }
    virtual_ipaddress {
            192.168.2.200/24 dev eth0
        }
}
##########  LVS  ###########
virtual_server 192.168.2.200 80 {
    delay_loop 6
    lb_algo rr
    lb_kind DR
#   persistence_timeout 20
    protocol TCP
     
    real_server 192.168.2.50 80 {
        weight 100
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 192.168.2.60 80 {
        weight 100
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}
###   HA DNS START   ###
vrrp_instance HA_DNS {
        state MASTER
        interface eth0
        lvs_sync_daemon_inteface eth0
        virtual_router_id 56
        priority 100
        advert_int 5
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                192.168.2.100/24 dev eth0
        }
}
##########  LVS DNS  ###########
virtual_server 192.168.2.100 53 {
        delay_loop 6
        lb_algo rr
        lb_kind DR
#   persistence_timeout 6
        protocol UDP 
 
        real_server 192.168.2.30 53 {
                weight 100
                TCP_CHECK {
                        connect_timeout 3
                        nb_get_retry 3
                        delay_before_retry 3
                        connect_port 53
                }
        }
        real_server 192.168.2.40 53 {
                weight 100
                TCP_CHECK {
                        connect_timeout 3
                        nb_get_retry 3
                        delay_before_retry 3
                        connect_port 53
                }
        }
}
 
##################################

 

2.DNS-Master及DNS-Backup配置

(1)DNS-Master及DNS-Backup网卡设置

DNS-Master及DNS-Backup在各自网卡基础上再添加lo:0端口。

wKioL1OF0EygSvQsAADMhdUf-kc872.jpg

(2)DNS-Master及DNS-Backup安装bind及ARP设置

1
2
3
4
5
# yum install bind bind-chroot
# vim /etc/sysctl.conf                  //添加以下两行
net.ipv4.conf.all.arp_announce = 2      //关闭所有接口的ARP响应和发布
net.ipv4.conf.all.arp_ignore = 1
# sysctl -p

(3)DNS-Master配置

1
2
3
4
5
6
# service named start                      
Generating /etc/rndc.key:                   [  OK  ]
Starting named:                             [  OK  ]
 
# cd /var/named/chroot/etc/
# vim  named.conf      //编辑主配置文档

wKiom1ODCfOT3RDjAAMXfgZ9jiY362.jpg

 

1
# vim  named.rfc1912.zones      //编辑区域声明文件[object Object]

wKiom1OFT3eQlsidAAHPye_lyp4105.jpg

 

1
2
3
4
5
# scp named.conf named.rfc1912.zones 192.168.2.40:/var/named/chroot/etc/
# cd /var/named/chroot/var/named/
# cp -p named.localhost abc.com.zone
# cp -p named.localhost abc.com.local
# vim abc.com.zone

wKiom1OFT3eQ2NDTAAD5uK9Bwpw809.jpg

1
# vim abc.com.local

wKioL1OFT0uiUSz6AADKgaEipsE248.jpg

 

(4)DNS-Backup配置

1
2
3
# cd /var/named/chroot/etc/
# chgrp named named.conf named.rfc1912.zones
# vim named.rfc1912.zones

wKioL1OFT0uCLP18AAEoaOlsQc0534.jpg

1
2
3
4
5
6
# service named start
# cd /var/named/chroot/var/named/slaves
# ll                                     //区域文件已经同步
-rw-r--r--. 1 named named 325 May 27 19:31 abc.com.local
-rw-r--r--. 1 named named 340 May 27 19:39 abc.com.zone
# vim abc.com.zone

wKioL1OFT0uzpY2VAAFCDolpI_s395.jpg

1
# vim abc.com.local

wKiom1OFT3fADo8NAAFHaBwsnyM840.jpg

 

3.web1及web2配置

在原有网卡基础上再添加一块lo:0网卡

wKioL1OF0GmyhfesAADMjF9kAk4117.jpg

1
2
3
4
5
6
7
8
# yum install httpd
# echo "This is web1.">/var/www/html/index.html     //web1配置
# echo "This is web2.">/var/www/html/index.html     //web2配置
# vim /etc/sysctl.conf                  //添加以下两行
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.all.arp_ignore = 1
# sysctl -p
# service httpd start

四、测试:

测试客户端IP配置:

wKioL1OEj5Tz53-SAADCwjRXRKU724.jpg

客户端浏览器访问http://www.abc.com,并不断刷新,可以看出web1和web2交替不断出现,在实际生产环境下web内容是一样的 。

wKiom1OEj8ChPBsyAADDAqNDTJc688.jpg

 

wKioL1OEj5SDNYSCAADBDCykjKg819.jpg

LB-Master(负责web轮询):

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.2.200:http rr
  -> 192.168.2.50:http            Route   100    0          4         
  -> 192.168.2.60:http            Route   100    0          4         
UDP  192.168.2.100:domain rr
  -> 192.168.2.30:domain          Route   100    0          0         
  -> 192.168.2.40:domain          Route   100    0          0 
 
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:1c:d4:8d brd ff:ff:ff:ff:ff:ff
    inet 192.168.2.10/24 brd 192.168.2.255 scope global eth0
    inet 192.168.2.200/24 scope global secondary eth0
    inet6 fe80::20c:29ff:fe1c:d48d/64 scope link 
       valid_lft forever preferred_lft forever

 

LB-Backup(负责DNS轮询):

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.2.200:http rr
  -> 192.168.2.50:http            Route   100    0          0         
  -> 192.168.2.60:http            Route   100    0          0         
UDP  192.168.2.100:domain rr
  -> 192.168.2.30:domain          Route   100    0          6         
  -> 192.168.2.40:domain          Route   100    0          6  
 
 
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:22:3d:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.2.20/24 brd 192.168.2.255 scope global eth0
    inet 192.168.2.100/24 scope global secondary eth0
    inet6 fe80::20c:29ff:fe22:3d01/64 scope link 
       valid_lft forever preferred_lft forever

 

五、故障模拟

  1. 模拟LB-Master失效

(1)停掉LB-Master的keepalived服务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# service keepalived stop
Stopping keepalived:                                [  OK  ]
# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:1c:d4:8d brd ff:ff:ff:ff:ff:ff
    inet 192.168.2.10/24 brd 192.168.2.255 scope global eth0
    inet6 fe80::20c:29ff:fe1c:d48d/64 scope link 
       valid_lft forever preferred_lft forever

(2)查看LB-Backup状态(不断在测试客户端刷新网页)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:22:3d:01 brd ff:ff:ff:ff:ff:ff
    inet 192.168.2.20/24 brd 192.168.2.255 scope global eth0
    inet 192.168.2.100/24 scope global secondary eth0
    inet 192.168.2.200/24 scope global secondary eth0
    inet6 fe80::20c:29ff:fe22:3d01/64 scope link 
       valid_lft forever preferred_lft forever
# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.2.200:http rr
  -> 192.168.2.50:http            Route   100    0          9         
  -> 192.168.2.60:http            Route   100    0          10        
UDP  192.168.2.100:domain rr
  -> 192.168.2.30:domain          Route   100    0          6         
  -> 192.168.2.40:domain          Route   100    0          6

2.模拟DNS-Master失效

(1)停掉DNS-Master的named服务,并恢复LB-Master的服务

1
2
# service named stop
Stopping named: .                                       [  OK  ]

(2)查看LB-Master状态(不断在测试客户端刷新网页)

1
2
3
4
5
6
7
8
9
# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.2.200:http rr
  -> 192.168.2.50:http            Route   100    0          13        
  -> 192.168.2.60:http            Route   100    0          14        
UDP  192.168.2.100:domain rr
  -> 192.168.2.40:domain          Route   100    0          0

(3)查看LB-Backup状态

1
2
3
4
5
6
7
8
9
# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.2.200:http rr
  -> 192.168.2.50:http            Route   100    0          0         
  -> 192.168.2.60:http            Route   100    0          0         
UDP  192.168.2.100:domain rr
  -> 192.168.2.40:domain          Route   100    0          6

3.模拟web1失效

停掉web1的httpd服务即可测试,这步比较简单,不再赘述。

本文出自 “一诺千金” 博客,请务必保留此出处http://hatech.blog.51cto.com/8360868/1417899

posted @ 2015-04-15 13:30  Fatt  阅读(933)  评论(0编辑  收藏  举报