Keepalived实现Nginx负载均衡机高可用
Keepalived实现Nginx负载均衡机高可用
环境说明:
系统信息 | 主机名 | IP |
---|---|---|
RHEL 8 | master | 192.168.100.1 |
RHEL 8 | slave | 192.168.100.2 |
RHEL 8 | client | 192.168.100.3 |
本次高可用虚拟IP(VIP)地址暂定为 192.168.100.200
Keepalived安装
配置主keepalived
#master
//关闭防火墙
[root@master ~]# systemctl disable --now firewalld
[root@master ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
[root@master ~]# setenforce 0
//安装keepalived
[root@master ~]# yum -y install keepalived
//查看安装生成的文件
[root@master ~]# rpm -ql keepalived
/etc/keepalived //配置目录
/etc/keepalived/keepalived.conf //此为主配置文件
/etc/sysconfig/keepalived
/usr/bin/genhash
/usr/lib/.build-id
/usr/lib/.build-id/6c
/usr/lib/systemd/system/keepalived.service //此为服务控制文件
/usr/libexec/keepalived
/usr/sbin/keepalived
······
用同样的方法在备服务器上安装keepalived
#slave
//关闭防火墙
[root@slave ~]# systemctl disable --now firewalld
[root@slave ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
[root@slave ~]# setenforce 0
//安装keepalived
[root@slave ~]# yum -y install keepalived
在主备机上分别安装Nginx
在master上安装nginx
#master
[root@master ~]# yum -y install nginx
[root@master ~]# cd /usr/share/nginx/html/
[root@master html]# echo 'master' > index.html
[root@master html]# systemctl enable --now nginx
[root@master html]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:80 [::]:*
LISTEN 0 128 [::]:22 [::]:*
在slave上安装nginx
#slave
[root@slave ~]# yum -y install nginx
[root@slave ~]# cd /usr/share/nginx/html/
[root@slave html]# echo 'slave' > index.html
[root@slave html]# systemctl enable --now nginx
[root@slave html]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:80 [::]:*
LISTEN 0 128 [::]:22 [::]:*
在浏览器上访问试试,确保master上的nginx服务能够正常访问
Keepalived配置
配置主Keepalived
#master
//查看自己网卡名称,我这里是ens160
[root@master ~]# ip a
······
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:f9:ec:35 brd ff:ff:ff:ff:ff:ff
inet 192.168.100.1/24 brd 192.168.100.255 scope global noprefixroute ens160
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fef9:ec35/64 scope link
valid_lft forever preferred_lft forever
[root@master ~]# cd /etc/keepalived/
[root@master keepalived]# ls
keepalived.conf
[root@master keepalived]# mv keepalived.conf{,.bak}
[root@master keepalived]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb01
}
vrrp_instance VI_1 {
state MASTER
interface ens160 //这里是修改成本机的网卡名称
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass baozi
}
virtual_ipaddress {
192.168.100.200
}
}
virtual_server 192.168.100.200 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.100.1 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.100.2 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@master ~]# systemctl enable --now keepalived
配置备Keepalived
#slave
//查看自己网卡名称,我这里是ens160
[root@slave ~]# ip a
······
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:56:9e:92 brd ff:ff:ff:ff:ff:ff
inet 192.168.100.2/24 brd 192.168.100.255 scope global noprefixroute ens160
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe56:9e92/64 scope link
valid_lft forever preferred_lft forever
[root@slave ~]# cd /etc/keepalived/
[root@slave keepalived]# ls
keepalived.conf
[root@slave keepalived]# mv keepalived.conf{,.bak}
[root@slave keepalived]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb02
}
vrrp_instance VI_1 {
state BACKUP
interface ens160
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass baozi
}
virtual_ipaddress {
192.168.100.200
}
}
virtual_server 192.168.100.200 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.100.1 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.100.2 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@slave ~]# systemctl enable --now keepalived
查看VIP在哪里
在MASTER上查看
#master
[root@master ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:f9:ec:35 brd ff:ff:ff:ff:ff:ff
inet 192.168.100.1/24 brd 192.168.100.255 scope global noprefixroute ens160
valid_lft forever preferred_lft forever
inet 192.168.100.200/32 scope global ens160 //可以看到此处有VIP
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fef9:ec35/64 scope link
valid_lft forever preferred_lft forever
在SLAVE上查看
#slave
[root@slave ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:56:9e:92 brd ff:ff:ff:ff:ff:ff
inet 192.168.100.2/24 brd 192.168.100.255 scope global noprefixroute ens160
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe56:9e92/64 scope link
valid_lft forever preferred_lft forever
修改内核参数,开启监听VIP功能
此步可做可不做,该功能可用于仅监听VIP的时候
在master上修改内核参数
#master
[root@master ~]# echo 'net.ipv4.ip_nonlocal_bind = 1' >>/etc/sysctl.conf
[root@master ~]# sysctl -p
net.ipv4.ip_nonlocal_bind = 1
[root@master ~]# cat /proc/sys/net/ipv4/ip_nonlocal_bind
1
在slave上修改内核参数
#slave
[root@slave ~]# echo 'net.ipv4.ip_nonlocal_bind = 1' >>/etc/sysctl.conf
[root@slave ~]# sysctl -p
net.ipv4.ip_nonlocal_bind = 1
[root@slave ~]# cat /proc/sys/net/ipv4/ip_nonlocal_bind
1
让Keepalived监控Nginx负载均衡机
keepalived通过脚本来监控nginx负载均衡机的状态
在master上编写脚本
#master
[root@master ~]# mkdir /scripts
[root@master ~]# vim /scripts/check_n.sh
#!/bin/bash
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl stop keepalived
fi
[root@master ~]# vim /scripts/notify.sh
#!/bin/bash
VIP=$2
sendmail (){
subject="${VIP}'s server keepalived state is translate"
content="`date +'%F %T'`: `hostname`'s state change to master"
echo $content | mail -s "$subject" qinghao_yu@163.com
}
case "$1" in
master)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl start nginx
fi
sendmail
;;
backup)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -gt 0 ];then
systemctl stop nginx
fi
;;
*)
echo "Usage:$0 master|backup VIP"
;;
esac
[root@master ~]# chmod +x /scripts/check_n.sh
[root@master ~]# chmod +x /scripts/notify.sh
[root@master ~]# ll /scripts/
total 8
-rwxr-xr-x. 1 root root 142 May 20 17:34 check_n.sh
-rwxr-xr-x. 1 root root 663 May 20 17:35 notify.sh
在slave上编写脚本
#slave
[root@slave ~]# mkdir /scripts
[root@slave ~]# vim /scripts/check_n.sh
#!/bin/bash
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl stop keepalived
fi
[root@slave ~]# vim /scripts/notify.sh
#!/bin/bash
VIP=$2
sendmail (){
subject="${VIP}'s server keepalived state is translate"
content="`date +'%F %T'`: `hostname`'s state change to master"
echo $content | mail -s "$subject" qinghao_yu@163.com
}
case "$1" in
master)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl start nginx
fi
sendmail
;;
backup)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -gt 0 ];then
systemctl stop nginx
fi
;;
*)
echo "Usage:$0 master|backup VIP"
;;
esac
[root@master ~]# chmod +x /scripts/check_n.sh
[root@master ~]# chmod +x /scripts/notify.sh
[root@slave ~]# ll /scripts/
total 8
-rwxr-xr-x. 1 root root 142 May 20 17:39 check_n.sh
-rwxr-xr-x. 1 root root 663 May 20 17:38 notify.sh
此处的脚本名称应避免与服务名相同,推荐用服务名的首字母代替,如check_n,不要给脚本起名check_nginx
配置Keepalived加入监控脚本的配置
配置主Keepalived
[root@master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb01
}
vrrp_script nginx_check {
script "/scripts/check_n.sh"
interval 10
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface ens160
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass baozi
}
virtual_ipaddress {
192.168.100.200
}
track_script {
nginx_check
}
notify_master "/scripts/notify.sh master 192.168.100.200"
notify_backup "/scripts/notify.sh backup 192.168.100.200"
}
virtual_server 192.168.100.200 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.100.1 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.100.2 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@master ~]# systemctl restart keepalived
配置备Keepalived
backup无需检测nginx是否正常,当升级为MASTER时启动nginx,当降级为BACKUP时关闭
[root@slave ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb02
}
vrrp_instance VI_1 {
state BACKUP
interface ens160
virtual_router_id 51
priority 90
nopreempt
advert_int 1
authentication {
auth_type PASS
auth_pass baozi
}
virtual_ipaddress {
192.168.100.200
}
notify_master "/scripts/notify.sh master 192.168.100.200"
notify_backup "/scripts/notify.sh backup 192.168.100.200"
}
virtual_server 192.168.100.200 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.100.1 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.100.2 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@slave ~]# systemctl restart keepalived
测试验证
模拟master挂掉,slave继承
#master
//开启keepalived和nginx
[root@master ~]# systemctl start keepalived
[root@master ~]# systemctl start nginx
[root@master ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:80 [::]:*
LISTEN 0 128 [::]:22 [::]:*
#slave
//开启keepalived
[root@slave ~]# systemctl start keepalived
[root@slave ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:22 [::]:*
#client
//此时客户端正常访问VIP到master
[root@client ~]# curl 192.168.100.200
master
#master
//模拟master挂掉
[root@master ~]# systemctl stop nginx
[root@master ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:22 [::]:*
#client
//此时再访问VIP会访问到slave
[root@client ~]# curl 192.168.100.200
slave
#slave
//此时的slave会因为master挂掉,自动上位,启动nginx
[root@slave ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:80 [::]:*
LISTEN 0 128 [::]:22 [::]:*
模拟master抢救回来后重新上位
#master
//先重启启动master的nginx,再启动keepalived
[root@master ~]# systemctl start nginx
[root@master ~]# systemctl start keepalived
[root@master ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:80 [::]:*
LISTEN 0 128 [::]:22 [::]:*
#client
//等待一会,再访问VIP会重新访问到master
[root@client ~]# curl 192.168.100.200
master
#slave
//此时slave会让位把nginx停止
[root@slave ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:22 [::]:*