nfs 研究双机热备记录
实现说明:两台服务器分别为nfs1,nfs2
nfs1 192.168.104.144
nfs2 192.168.104.145
vip 192.168.104.200
使用nfs+rsync+inotify+keepalived等工具
关闭防火墙和selinux,保证主机名能解析,设置好ssh互信
两台服务器:
# yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
# yum -y install nfs-utils keepalived rsync inotify-tools
设置nfs:
# systemctl enable --now nfs
# mkdir /home/data1
# chown -R 36:36 /home/data #此处赋予拥有用户和组根据个人需求
# vi /etc/exports
/home/data *(rw,sync,no_subtree_check,all_squash,anon_uid=36,anon_gid=36
# systemctl restart nfs
设置rsync和inotify
注意:syncd服务不能手动启动,否则会两台服务器冲突
# vi /etc/rsyncd.conf
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
lock file = /var/run/rsyncd.lock
secrets file = /etc/rsync.password
[etc_from_client]
path = /home/data1
comment = sync etc from client
uid = root
gid = root
port = 873
ignore errors
use chroot = no
read only = no
list = no
max connections = 200
timeout = 600
auth users = vdsm
# vi /etc/rsync.password
vdsm:123456
# vi /etc/rsync.auth
123456
# chmod 600 /etc/rsync.* #注意权限必须是600否则后面验证密码会失败
创建脚本存放路径
# mkdir /scripts/
设置自动同步数据
# vi /scripts/inotify.sh
#!/bin/bash
host=192.168.104.145 #nfs2的话设置192.168.104.144
src=/home/data1/ #注意后面要斜杠,否则会将目录data1目录同步
des=etc_from_client
auth=/etc/rsync.auth
user=vdsm
inotifywait=/usr/bin/inotifywait
$inotifywait -mrq --timefmt '%Y%m%d %H:%M' --format '%T %w%f%e' -e modify,delete,create,attrib $src \
| while read files ;do
rsync -avzP --delete --timeout=100 --password-file=${auth} $src $user@$host::$des
echo "${files} was rsynced" >> /tmp/rsync.log 2>&1
done
# chown -R 755 /scripts/
#设置开机自动执行脚本
# chmod +x /etc/rc.d/rc.local
# vi /etc/rc.d/rc.local
nohup sh /scripts/inotify.sh &
keepalived 的配置
主nfs1:
# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
mail@halen.com
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id nfs1
vrrp_skip_check_adv_addr
}
vrrp_script monitor_nfs_status {
script "/scripts/monitor_nfs_status.sh"
interval 5
fall 3
rise 1
weight -15
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 55
priority 100
advert_int 1
nopreempt
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress { 192.168.104.200/24 }
track_script {
monitor_nfs_status
}
track_interface {
ens33
}
notify_master "/scripts/keepalived_notify.sh master"
notify_backup "/scripts/keepalived_notify.sh backup"
notify_fault "/scripts/keepalived_notify.sh fault"
notify_stop "/scripts/keepalived_notify.sh stop"
}
备nfs2:
# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
mail@halen.com
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id nfs2
vrrp_skip_check_adv_addr
}
vrrp_script monitor_nfs_status {
script "/scripts/monitor_nfs_status.sh"
interval 5
fall 3
rise 1
weight -15
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 55
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.104.200/24
}
track_script {
monitor_nfs_status
}
track_interface {
ens33
}
notify_master "/scripts/keepalived_notify.sh master"
notify_backup "/scripts/keepalived_notify.sh backup"
notify_fault "/scripts/keepalived_notify.sh fault"
notify_stop "/scripts/keepalived_notify.sh stop"
}
两台服务器:
设置keepalive检测脚本
# vi /scripts/keepalived_notify.sh
#!/bin/bash
master() {
systemctl status nfs
if [ $? -ne 0 ];then
systemctl restart nfs
fi
systemctl status rsyncd
if [ $? -eq 0 ];then
systemctl stop rsyncd
fi
}
backup() {
systemctl status nfs
if [ $? -ne 0 ];then
systemctl restart nfs
fi
systemctl status rsyncd
if [ $? -ne 0 ];then
systemctl restart rsyncd
fi
}
fault() {
systemctl status nfs
if [ $? -ne 0 ];then
systemctl restart nfs
fi
if [ $? -eq 0 ];then
systemctl stop rsyncd
fi
}
case $1 in
master)
master
;;
backup)
backup
;;
fault)
fault
systemctl restart keepalived
;;
stop)
backup
systemctl restart keepalived
;;
*)
echo $"Usage: $0 {master|backup|fault|stop}"
esac
~
# vi /scripts/monitor_nfs_status.sh
!/bin/bash
systemctl status nfs &>/dev/null
if [ $? -eq 0 ];then
systemctl status rpcbind &>/dev/null
if [ $? -eq 0 ];then
exit 0
fi
exit 2
else
systemctl restart nfs &> /dev/null
if [ ps -C nfsd --no-header | wc -l -ne 0 ];then
pkill keepalived && exit 1
fi
fi
# chown -R 755 /scripts/
由于想nfs1作为主,且keepalived设置了当存在master不抢占
建议nfs1 先执行 systemctl enable --now keepalived
之后nfs2 再执行 systemctl enable --now keepalived
#由于本人都是测试完再写的,很可能哪里写错或者漏写,可以查看tail -f /var/log/rsyncd.log 和message日志,有问题的话可以留言告诉我