haproxy + keeplived
两台主机:
192.168.2.163
192.168.2.165
# yum安装haproxy
yum install haproxy
# cat /etc/haproxy/haproxy.cfg
实际使用的:
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
user haproxy
group haproxy
daemon
nbproc 4
maxconn 100000
tune.ssl.default-dh-param 2048
defaults
log global
option httplog
option forwardfor
option abortonclose
option dontlognull
retries 2
maxconn 100000
timeout connect 5s
timeout client 10m
timeout server 10m
listen admin_stats
mode http
bind *:8899
stats enable
stats refresh 30s
stats uri /stats
stats realm XingCloud\ Haproxy
stats auth admin:admin
stats hide-version
listen www
bind 0.0.0.0:8888 # 80端口被占用了,这里改用8888端口
mode http
balance roundrobin
server www1 192.168.2.162:8080 check inter 2000 rise 30 fall 15
server www2 192.168.2.164:8080 check inter 2000 rise 30 fall 15
#### 以下这些是参考的 ##########
global
log 127.0.0.1 local0
maxconn 100000
user haproxy
group haproxy
daemon
nbproc 4
tune.ssl.default-dh-param 2048
defaults
log global
mode http
#option httpclose
option redispatch
option forwardfor
option abortonclose
option dontlognull
retries 2
maxconn 100000
#balance source
timeout connect 10000
timeout client 100000
timeout server 100000
listen admin_stats
bind *:8899
mode http
option httplog
log 127.0.0.1 local0 err
maxconn 10
stats refresh 30s
stats uri /stats
stats realm XingCloud\ Haproxy
stats auth admin:admin
stats hide-version
listen redis
bind 0.0.0.0:6379
mode tcp
balance roundrobin
server node1 10.10.72.45:6379 minconn 4 maxconn 10000 check inter 2000 rise 2 fall 5
server node2 10.10.72.46:6379 minconn 4 maxconn 10000 check inter 2000 rise 2 fall 5
listen gxpt-dsqz
bind 0.0.0.0:52001
mode http
balance roundrobin
option httpchk GET /
server node1 10.10.72.29:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node2 10.10.72.30:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node3 10.10.72.31:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node4 10.10.72.32:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node5 10.10.72.33:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node6 10.10.72.34:52001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
listen gxpt-dsqz-ssl
bind 0.0.0.0:54001 ssl crt /opt/cert/gxpt.pem verify none
mode http
balance roundrobin
option httpchk GET /
server node1 10.10.72.2:5001 ssl verify none minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node2 10.10.72.3:5001 ssl verify none minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
# 在client增加cookie
backend backend_www
option forwardfor
cookie SERVERID insert indirect nocache #插入session信息
option redispatch #当后端rs挂了,可立即切换,不会出现503错误
option httpchk HEAD / HTTP/1.0
balance roundrobin
server www1 192.168.1.198:80 cookie www1check inter 2000 rise 30 fall 15
server www2 192.168.1.52:80 cookie www2 checkinter 2000 rise 30 fall 15
# balance source 根据原ip,经过hash计算后,指定后端固定的rs
backend backend_www
option forwardfor
option httpchk HEAD / HTTP/1.0
balance source
server www1 192.168.1.198:80 check inter2000 rise 30 fall 15
server www2 192.168.1.52:80check inter 2000 rise 30 fall 15
frontend frontend_58001
bind 0.0.0.0:58001
mode http
option tcplog
acl fpcloud-yypt path_beg -i /fpcloud-yypt
use_backend fpcloud-yypt if fpcloud-yypt
acl fpcloud-web path_beg -i /fpcloud-web
use_backend fpcloud-web if fpcloud-web
backend fpcloud-web
mode http
balance leastconn
server node1 10.72.1.233:58001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node2 10.72.1.241:58001 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
backend fpcloud-yypt
mode http
balance leastconn
server node1 10.72.1.233:58002 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
server node2 10.72.1.241:58002 minconn 100 maxconn 10000 check inter 2000 rise 1 fall 2
# 启动haproxy
systemctl start haproxy.service
systemctl enable haproxy.service
2 配置haproxy 日志
# 编辑haproxy配置文件,这一步配置文件中已经写过了,这里不用再修改了
# vim haproxy.cfg
global
log 127.0.0.1 local2
#local2是设备,对应于/etc/rsyslog.conf中的配置,默认是info的日志级别
defaults
log global # 必须配置
option httplog # 配置
# 编辑系统日志配置
# 为haproxy创建一个独立的配置文件
# vim /etc/rsyslog.d/haproxy.conf
$ModLoad imudp
$UDPServerRun 514
local2.* /opt/var/logs/haproxy/haproxy.log
local2.warning /opt/var/logs/haproxy/haproxy_warn.log
# 如果不加下面的的配置则除了在/opt/var/logs/haproxy/haproxy.log 中写入日志外,也会写入message文件
# vim /etc/rsyslog.conf
默认有下面的设置,会读取 /etc/rsyslog.d/*.conf目录下的配置文件
$IncludeConfig /etc/rsyslog.d/*.conf
# 禁止写入message
*.info;mail.none;authpriv.none;cron.none;local2.none /var/log/messages
# mkdir /opt/var/logs/haproxy/ -p
# 配置rsyslog的主配置文件,开启远程日志
# vim /etc/sysconfig/rsyslog
SYSLOGD_OPTIONS="-c 2 -r -m 0"
#-c 2 使用兼容模式,默认是 -c 5
#-r 开启远程日志
#-m 0 标记时间戳。单位是分钟,为0时,表示禁用该功能
# 重启haproxy和rsyslog服务
# centos7
# systemctl restart rsyslog
# systemctl restart haproxy
# systemctl enable rsyslog
3 配置haproxy日志轮转
# vim /etc/logrotate.d/haproxy
/opt/var/logs/haproxy/haproxy*.log {
daily
rotate 7
create
missingok
notifempty
dateext
compress
sharedscripts
postrotate
# /bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
# /bin/kill -HUP `cat /var/run/rsyslogd.pid 2> /dev/null` 2> /dev/null || true
/etc/init.d/rsyslog restart
endscript
}
参考系统默认配置:
/opt/var/logs/haproxy/*.log {
daily
rotate 10
missingok
notifempty
compress
sharedscripts
postrotate
/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
/bin/kill -HUP `cat /var/run/rsyslogd.pid 2> /dev/null` 2> /dev/null || true
endscript
}
# 强制轮转测试
# logrotate -vf /etc/logrotate.d/haproxy
安装keeplived
yum -y install epel-release
yum -y install keepalived
# 163主机操作,作为master
# vim /etc/keepalived/keepalived.conf
global_defs {
router_id haproxy_ha1
}
vrrp_script chk_maintaince_down {
script "[[ -f /etc/keepalived/down ]] && exit 1 || exit 0"
interval 1
weight 2
}
vrrp_script chk_haproxy {
script "/etc/keepalived/scripts/haproxy_check.sh"
interval 2
timeout 2
fall 3
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 29
priority 100
authentication {
auth_type PASS
auth_pass 1e3459f77aba4ded
}
track_interface {
ens33
}
virtual_ipaddress {
192.168.2.250 dev ens33 label ens33:1
}
track_script {
chk_haproxy
}
notify_master "/etc/keepalived/scripts/haproxy_master.sh"
}
165主机操作,作为back
# vim /etc/keepalived/keepalived.conf
global_defs {
router_id haproxy_ha1
}
vrrp_script chk_maintaince_down {
script "[[ -f /etc/keepalived/down ]] && exit 1 || exit 0"
interval 1
weight 2
}
vrrp_script chk_haproxy {
script "/etc/keepalived/scripts/haproxy_check.sh"
interval 2
timeout 2
fall 3
}
vrrp_instance VI_1 {
state BACK # 与上面的不同
interface ens33
virtual_router_id 29
priority 90 # 比上面的小
authentication {
auth_type PASS
auth_pass 1e3459f77aba4ded
}
track_interface {
ens33
}
virtual_ipaddress {
192.168.2.250 dev ens33 label ens33:1
}
track_script {
chk_haproxy
}
notify_master "/etc/keepalived/scripts/haproxy_master.sh"
}
两台主机都需要做的操作:
mkdir -p /etc/keepalived/scripts
mkdir -p /opt/var/logs/keepalived/
# vim /etc/keepalived/scripts/haproxy_check.sh
#!/bin/bash
LOGFILE="/opt/var/logs/keepalived/keepalived-haproxy-state.log"
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
date >> $LOGFILE
systemctl restart haproxy
sleep 1
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
echo "fail: check_haproxy status" >> $LOGFILE
exit 1
else
echo "success: restart_haproxy status" >> $LOGFILE
exit 0
fi
else
exit 0
fi
# vim /etc/keepalived/scripts/haproxy_master.sh
#!/bin/bash
LOGFILE="/opt/var/logs/keepalived/keepalived-haproxy-state.log"
echo "Being Master ..." >> $LOGFILE
chmod a+x /etc/keepalived/scripts/haproxy_check.sh /etc/keepalived/scripts/haproxy_master.sh
两台主机启动keepalived
163主机网卡信息
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:50:56:3a:cc:20 brd ff:ff:ff:ff:ff:ff
inet 192.168.2.163/24 brd 192.168.2.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.2.250/32 scope global ens33:1
valid_lft forever preferred_lft forever
inet6 fe80::8041:19f:b29:7354/64 scope link noprefixroute
valid_lft forever preferred_lft forever
165主机网卡信息
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:50:56:35:92:64 brd ff:ff:ff:ff:ff:ff
inet 192.168.2.165/24 brd 192.168.2.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::7320:404e:a7f2:6fbf/64 scope link noprefixroute
valid_lft forever preferred_lft forever
inet6 fe80::6435:91f7:6c5:fa28/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::8ebe:5815:b0b3:d833/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
haproxy故障漂移测试
目前脚本的作用是在keepalive vip 那台服务器 停止haproxy服务,会立刻再启动haproxy服务,除非这台主机关机,没法再启动haproxy服务,
此时keepalive vip 才会漂移到另外一台haproxy服务上。
当原有主机再次启动haproxy服务后,keepalive vip 又会回来。
问题:
1.在keepalive vip 漂移过程中会有短暂的服务访问缓慢的情况
2.haproxy中设置的是轮询,火狐浏览器上会看到效果,谷歌浏览器上效果不明显