ubuntu20.04部署keepalived
一 环境准备
1.1 设置时间同步
root@node-01:~# apt -y install chrony
root@node-01:~# systemctl enable chrony
二 在线安装
2.1 查看keepalived软件版本
root@node-01:~# apt-cache madison keepalived
keepalived | 1:2.0.19-2 | http://mirrors.aliyun.com/ubuntu focal/main amd64 Packages
2.2 安装keepalived
root@node-01:~# apt -y install keepalived
2.3 准备keepalived.conf文件
root@node-01:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.sample /etc/keepalived/keepalived.conf
2.4 修改keepalived.conf文件
点击查看代码
root@node-01:~# sed -i 's@interface eth0@interface ens33@g' /etc/keepalived/keepalived.conf
2.5 启动keepalived服务
点击查看代码
root@node-01:~# systemctl start keepalived root@node-01:~# systemctl status keepalived ● keepalived.service - Keepalive Daemon (LVS and VRRP) Loaded: loaded (/lib/systemd/system/keepalived.service; enabled; vendor preset: enabled) Active: active (running) since Tue 2021-11-09 12:05:12 CST; 3s ago Main PID: 2182 (keepalived) Tasks: 3 (limit: 2245) Memory: 2.3M CGroup: /system.slice/keepalived.service ├─2182 /usr/sbin/keepalived --dont-fork ├─2194 /usr/sbin/keepalived --dont-fork └─2195 /usr/sbin/keepalived --dont-fork
Nov 09 12:05:12 keepalived-01 Keepalived[2182]: Starting VRRP child process, pid=2195
Nov 09 12:05:12 keepalived-01 Keepalived_healthcheckers[2194]: Initializing ipvs
Nov 09 12:05:12 keepalived-01 Keepalived_healthcheckers[2194]: Gained quorum 1+0=1 <= 1 for VS [10.10.10.2]:tcp:1358
Nov 09 12:05:12 keepalived-01 Keepalived_healthcheckers[2194]: Activating healthchecker for service [192.168.200.2]:tcp:1358 for VS [10.10.10.2]:tcp:1358
Nov 09 12:05:12 keepalived-01 Keepalived_healthcheckers[2194]: Activating BFD healthchecker
Nov 09 12:05:12 keepalived-01 Keepalived_vrrp[2195]: Registering Kernel netlink reflector
Nov 09 12:05:12 keepalived-01 Keepalived_vrrp[2195]: Registering Kernel netlink command channel
Nov 09 12:05:12 keepalived-01 Keepalived_vrrp[2195]: Opening file '/etc/keepalived/keepalived.conf'.
Nov 09 12:05:12 keepalived-01 Keepalived_vrrp[2195]: Registering gratuitous ARP shared channel
Nov 09 12:05:12 keepalived-01 Keepalived_vrrp[2195]: (VI_1) Entering BACKUP STATE (init)
2.6 验证VIP
点击查看代码
root@node-01:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:50:56:2e:5a:9c brd ff:ff:ff:ff:ff:ff
inet 192.168.174.120/24 brd 192.168.174.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.200.11/32 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.200.12/32 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.200.13/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::250:56ff:fe2e:5a9c/64 scope link
valid_lft forever preferred_lft forever
三 编译安装keepalived
3.1 安装依赖
root@node-01:~# apt -y install gcc openssl libssl-dev make libnl-3-dev
3.2 下载keepalived安装包
root@keepalived-01:/opt# wget https://keepalived.org/software/keepalived-2.2.4.tar.gz
3.3 安装keepalived
root@node-01:/opt# tar xf keepalived-2.2.4.tar.gz
root@node-01:/opt# cd keepalived-2.2.4/
root@node-01:/opt/keepalived-2.2.4# ./configure --prefix=/usr/local/keepalived
root@node-01:/opt/keepalived-2.2.4# make && make install
3.4 准备keepalived.conf文件
点击查看代码
root@node-01:~# mkdir -pv /etc/keepalived
mkdir: created directory '/etc/keepalived'
root@node-01:~# cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
root@node-01:~# sed -i 's@interface eth0@interface ens33@g' /etc/keepalived/keepalived.conf
3.4 准备keepalived.service文件
root@node-01:~# cat /lib/systemd/system/keepalived.service [Unit] Description=Keepalive Daemon (LVS and VRRP) After=network-online.target Wants=network-online.target
[Service]
Type=forking
PIDFile=/run/keepalived.pid
killmode=process
EnvironmentFile=-/usr/local/keepalived/etc/sysconfig/keepalived
ExecStart=/usr/local/keepalived/sbin/keepalived $KEEPALIVED_OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target
3.5 开机启动keepalived
root@node-01:~# systemctl enable keepalived
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /lib/systemd/system/keepalived.service.
3.6 启动keepalived
点击查看代码
root@node-01:~# systemctl start keepalived root@node-01:~# systemctl status keepalived ● keepalived.service - Keepalive Daemon (LVS and VRRP) Loaded: loaded (/lib/systemd/system/keepalived.service; enabled; vendor preset: enabled) Active: active (running) since Tue 2021-11-09 13:50:48 CST; 2s ago Process: 66737 ExecStart=/usr/local/keepalived/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS) Main PID: 66753 (keepalived) Tasks: 3 (limit: 2245) Memory: 1.7M CGroup: /system.slice/keepalived.service ├─66753 /usr/local/keepalived/sbin/keepalived -D ├─66754 /usr/local/keepalived/sbin/keepalived -D └─66755 /usr/local/keepalived/sbin/keepalived -D
Nov 09 13:50:48 node-01 Keepalived_healthcheckers[66754]: Gained quorum 1+0=1 <= 2 for VS [10.10.10.2]:tcp:1358
Nov 09 13:50:48 node-01 Keepalived_healthcheckers[66754]: Gained quorum 1+0=1 <= 2 for VS [10.10.10.3]:tcp:1358
Nov 09 13:50:48 node-01 Keepalived_healthcheckers[66754]: Activating healthchecker for service [192.168.201.100]:tcp:443 for VS [192.168.200.100]:tcp:443
Nov 09 13:50:48 node-01 Keepalived_healthcheckers[66754]: Activating healthchecker for service [192.168.200.2]:tcp:1358 for VS [10.10.10.2]:tcp:1358
Nov 09 13:50:48 node-01 Keepalived_healthcheckers[66754]: Activating healthchecker for service [192.168.200.3]:tcp:1358 for VS [10.10.10.2]:tcp:1358
Nov 09 13:50:48 node-01 Keepalived_healthcheckers[66754]: Activating healthchecker for service [192.168.200.4]:tcp:1358 for VS [10.10.10.3]:tcp:1358
Nov 09 13:50:48 node-01 Keepalived_healthcheckers[66754]: Activating healthchecker for service [192.168.200.5]:tcp:1358 for VS [10.10.10.3]:tcp:1358
Nov 09 13:50:48 node-01 Keepalived_vrrp[66755]: (VI_1) Entering BACKUP STATE (init)
Nov 09 13:50:48 node-01 Keepalived_vrrp[66755]: VRRP sockpool: [ifindex( 2), family(IPv4), proto(112), fd(13,14)]
Nov 09 13:50:48 node-01 systemd[1]: Started Keepalive Daemon (LVS and VRRP).
3.7 验证vip
root@node-01:~# hostname -I
192.168.174.120 192.168.200.16 192.168.200.17 192.168.200.18
3.8 测试vip连通性
root@node-01:~# ping -c 2 192.168.200.18 PING 192.168.200.18 (192.168.200.18) 56(84) bytes of data. 64 bytes from 192.168.200.18: icmp_seq=1 ttl=64 time=0.016 ms 64 bytes from 192.168.200.18: icmp_seq=2 ttl=64 time=0.045 ms
--- 192.168.200.18 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1014ms
rtt min/avg/max/mdev = 0.016/0.030/0.045/0.014 ms
四 keepalived.conf文件解析
4.1 实现独立配置文件
root@node-01:~# tail -n1 /etc/keepalived/keepalived.conf
include /etc/keepalived/conf.d/*.conf
4.2 keepalived.conf文件详解
点击查看代码
root@node-01:~# cat /etc/keepalived/keepalived.conf ! Configuration File for keepalived
global_defs {
notification_email { #keepalived发生故障切换时邮件发送的目标邮箱
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc #发送邮件的地址
smtp_server 192.168.200.1 #邮件服务器地址
smtp_connect_timeout 30 #邮件服务器连接timeout
router_id LVS_DEVEL #每个keepalived主机唯一标识,建议使用当前主机名称
vrrp_skip_check_adv_addr #对所有通告报文都检查,会比较消耗性能,启动此配置后,如果收到的通告报文和上一个报文是同一个路由器,则跳过检查,默认值为全检查
vrrp_strict #严格遵守VRRP协议,禁止以下情况:1.无VIP地址;2.配置了单播;3.在VRRP版本2中有IPV6地址,开启此项会自动开启iptalbes防火墙规则,建议关闭此项配置
vrrp_garp_interval 0 #gratuitous ARP message报文发送延迟,0标识不延迟
vrrp_gna_interval 0 #unsolicited NA message 消息发送延迟
}vrrp_instance VI_1 {
state MASTER # 当前节点在虚拟路由上的初始状态,状态为MASTER或者BACKUP
interface ens33 #修改此行,默认为eth0 绑定为当前虚拟路由器使用的物理接口
virtual_router_id 51 #每个虚拟路由器唯一标识,范围0-255.每个虚拟路由器必须唯一,同属一个虚拟路由器的多个keepalived节点必须相同
priority 100 #当前物理节点在此虚拟路由器的优先级,范围1-254,每个keepalived节点此值不同
advert_int 1 #vrrp通告的时间间隔,默认1s
authentication { #认证机制
auth_type PASS #预共享秘钥,仅前8位有效,同一个虚拟路由器的多个keepalived节点必须一样
auth_pass 1111
}
virtual_ipaddress { #虚拟IP
192.168.200.16 #指定VIP,不指定网卡,默认为eth0,默认掩码32
192.168.200.17/24 dev eth1 #指定VIP网卡
192.168.200.18/24 dev ens33 label ens33:1 # 指定VIP网卡label
}
}virtual_server 192.168.200.100 443 { #虚拟服务器,VIP和PORT
delay_loop 6 #检查后端服务器的时间间隔
lb_algo rr # 定义调度方法 rr|wrr|lc|wlc|lblc|sh|dh
lb_kind NAT #集群的类型, NAT|DR|TUN
persistence_timeout 50 #持久连接时长
protocol TCP #指定服务协议,TCP|UDP|SCTPreal_server 192.168.201.100 443 { #RS的IP和PORT weight 1 #RS的权重 SSL_GET { #应用层检测 HTTP_GET|SSL_GET url { path / #定义要监控的URL digest ff20ad2481f97b1754ef3e12ecd3a9cc } url { path /mrtg/ digest 9b3a0c85a887a256d6939da88aabd8cd } connect_timeout 3 #客户端请求的超时时长 retry 3 #重试次数 delay_before_retry 3 #重试之前的延迟时长 } }
}
virtual_server 10.10.10.2 1358 {
delay_loop 6
lb_algo rr
lb_kind NAT
persistence_timeout 50
protocol TCPsorry_server 192.168.200.200 1358 real_server 192.168.200.2 1358 { weight 1 HTTP_GET { url { path /testurl/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl2/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl3/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } connect_timeout 3 retry 3 delay_before_retry 3 } } real_server 192.168.200.3 1358 { weight 1 HTTP_GET { url { path /testurl/test.jsp digest 640205b7b0fc66c1ea91c463fac6334c } url { path /testurl2/test.jsp digest 640205b7b0fc66c1ea91c463fac6334c } connect_timeout 3 retry 3 delay_before_retry 3 } }
}
virtual_server 10.10.10.3 1358 {
delay_loop 3
lb_algo rr
lb_kind NAT
persistence_timeout 50
protocol TCPreal_server 192.168.200.4 1358 { weight 1 HTTP_GET { url { path /testurl/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl2/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl3/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } connect_timeout 3 retry 3 delay_before_retry 3 } } real_server 192.168.200.5 1358 { weight 1 HTTP_GET { url { path /testurl/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl2/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } url { path /testurl3/test.jsp digest 640205b7b0fc66c1ea91c463fac6334d } connect_timeout 3 retry 3 delay_before_retry 3 } }
}