自动重做mysql从库脚本recreate_slaves_1.1

#!/bin/bash
# Author : Hunter Huang
# Date : 2020-12-22
# Version : 1.0
# Desc : 根据文件中配置实例信息自动重做从库,通过mydumper在backup_node节点(可以为主库或者备库,推荐选择从库上备份)的实例备份,依次恢复target_list列表中的从库,从库指向master配置的节点
###################################################
# Date : 2021-01-06
# version 1.1修改:
# myloder 恢复增加参数 --enable-binlog , 从而支持集群的迁移,如将老集群同步到新集群,并且将新集群的master节点作为老集群的slave,这种情形如下:
# master:老集群的主节点,新集群的主节点将指向老集群的master
# backup_node:老集群的备份节点,可以为老集群的master或slave节点,做备份
# target_list:
# 新集群的主节点,加了 --enable-binlog 参数恢复新集群主节点时,新集群的从节点也将同步数据
###################################################

echo "自动重建从库任务执行中..."

#在backup_node上进行备份,从instance_list.param文件读取备份节点信息
backup_host=`cat instance_list.param | grep -i "backup_node" | awk -F ':' '{print $2}' | sed 's/ //g'`
backup_port=`cat instance_list.param | grep -i "backup_node" | awk -F ':' '{print $3}' | sed 's/ //g'`
backup_dir="backup_${backup_host}_${backup_port}"
logfile=logs/${backup_host}_${backup_port}.log

rm -rf $backup_dir
mkdir $backup_dir

rm -rf logs
mkdir logs

echo `date +%Y%m%d%H%M%S`" job begin..." > $logfile

#开始mydumper备份
echo `date +%Y%m%d%H%M%S`" begin backup source db..." >>$logfile
#mydumper -h $backup_host -P $backup_port -u mydumper -p xxx --skip-tz-utc -o $backup_dir --regex '^(?!(mysql|test|information_schema|performance_schema|sys))' -G -R -E -c -t 4
mydumper -h $backup_host -P $backup_port --skip-tz-utc -o $backup_dir --regex '^(?!(mysql|test|information_schema|performance_schema|sys))' -G -R -E -c -t 4

if [ $? != 0 ]; then
echo "mydumper backup failed" >> $logfile
echo "Job Failed,Please check job log"
exit 1
else
#查找并保存备份对应的binlog位点
#如果是在slave节点上进行的备份,找到对应的master的位点
if grep -qi "slave status" ${backup_dir}/metadata ; then
binlog_file=`cat ${backup_dir}/metadata | grep -A 20 "SHOW SLAVE STATUS" | grep "Log" | awk -F ':' '{print $2}' | sed 's/ //g'`
binlog_position=`cat ${backup_dir}/metadata | grep -A 20 "SHOW SLAVE STATUS" | grep "Pos" | awk -F ':' '{print $2}' | sed 's/ //g'`
echo "从库上备份,备份对应的主库binlog位点为:"$binlog_file" "$binlog_position >>$logfile
else
#如果是在主库进行的备份,找对对应的master的位点
binlog_file=`cat ${backup_dir}/metadata | grep -A 20 "SHOW MASTER STATUS" | grep "Log" | awk -F ':' '{print $2}' | sed 's/ //g'`
binlog_position=`cat ${backup_dir}/metadata | grep -A 20 "SHOW MASTER STATUS" | grep "Pos" | awk -F ':' '{print $2}' | sed 's/ //g'`
echo "主库上备份,备份对应的主库binlog位点为:"$binlog_file" "$binlog_position >>$logfile
fi
echo `date +%Y%m%d%H%M%S`": mydumper backup complete success" >> $logfile
fi
#从备份库通过pt-show-grants导出用户权限信息
user_privs_file=logs/user_privs_${backup_host}_${backup_port}.sql
pt-show-grants -h $backup_host -P $backup_port --ignore root@localhost > $user_privs_file

#从instanc_list.param中获取change master to指向的master ip 端口
master_ip=`cat instance_list.param | grep -i "master" | awk -F ':' '{print $2}' | sed 's/ //g'`
master_port=`cat instance_list.param | grep -i "master" | awk -F ':' '{print $3}' | sed 's/ //g'`

#依次读取target_list列表中要重做的从库,恢复并change master
echo `date +%Y%m%d%H%M%S`" begin recreate target slave..." >>$logfile
cat instance_list.param | grep -A 20 target | grep -v "target" | awk -F ':' '{print $1,$2}' |while read target_host target_port
do
echo "----------"`date +%Y%m%d%H%M%S`"开始重建节点${target_host}:"${target_port}"----------"
echo "----------"`date +%Y%m%d%H%M%S`"开始重建节点${target_host}:"${target_port}"----------" >>$logfile
#echo `date +%Y%m%d%H%M%S`": drop databases on instance ${target_host}:${target_port}..."

#要恢复的节点上先删除业务数据库
drop_database_file=./logs/drop_database_${target_host}_${target_port}.sql
mysql -h ${target_host} -P ${target_port} information_schema -NBe "select concat('drop database ',SCHEMA_NAME,';') from SCHEMATA where SCHEMA_NAME not in ('information_schema','mysql','performance_schema','sys','test')" > $drop_database_file

if [ $? != 0 ]; then
echo "drop database文件生成失败" >> $logfile
echo "Job Failed,Please check job log"
exit 1
else
echo "drop database文件生成成功" >> $logfile
fi

#恢复从库前,先清理掉业务数据库
#mysql -h ${target_host} -P ${target_port} -umydumper -pvGW7Ip1SzOfnsRrwGtMj information_schema < $drop_database_file
mysql -h ${target_host} -P ${target_port} information_schema < $drop_database_file
if [ $? != 0 ]; then
echo "恢复从库前清理从库失败" >> $logfile
echo "Job Failed,Please check job log"
exit 1
else
echo "从库清理成功" >> $logfile
fi

#myloader恢复数据库
#myloader -h ${target_host} -P ${target_port} -u mydumper -p vGW7Ip1SzOfnsRrwGtMj -t 4 -d $backup_dir
myloader -h ${target_host} -P ${target_port} -t 4 -d $backup_dir --enable-binlog
if [ $? != 0 ]; then
echo "myloader恢复从库失败" >> $logfile
echo "Job Failed,Please check job log"
exit 1
else
echo "myloader恢复从库成功" >> $logfile
fi

#恢复用户及权限信息
mysql -h ${target_host} -P ${target_port} information_schema < $user_privs_file

#执行change master to 将slave指向master
mysql -h ${target_host} -P ${target_port} <<EOF
stop slave;
reset slave all;
change master to master_host="${master_ip}",master_port=${master_port},master_user='slave',master_password='zhucongoppoyzf#jdsa9',master_log_file="${binlog_file}",master_log_pos=$binlog_position;
start slave;
EOF

#验证从库重建成功
io_slave_status=$(mysql -h ${target_host} -P ${target_port} -Be "show slave status\G" | grep -i "Slave_IO_Running" |awk -F ':' '{print $2}')
sql_slave_status=$(mysql -h ${target_host} -P ${target_port} -Be "show slave status\G" | grep -i "Slave_SQL_Running:" |awk -F ':' '{print $2}')
if [ $io_slave_status == "Yes" ] && [ $sql_slave_status == "Yes" ]; then
echo "从库重建完成,检查状态成功"
echo "从库重建完成,检查状态成功" >> $logfile
else
echo "从库重建完成,检查状态异常"
echo "从库重建完成,检查状态异常" >> $logfile
exit 1
fi
done

 

posted @ 2021-04-07 19:23  HunterHuang  阅读(62)  评论(0编辑  收藏  举报