自动生成ctl文件,并将文件入库!shell脚本 (实用)
- 脚本解释:一共需要传4个参数,oracle表用户、oracle表名、需要入库的文件、文件分割符
- 注意:需要在脚本所在路径下创建5个文件夹,scrip、bad、ctl、log、temp
#!/bin/sh
#AUTHOR:趴嘞怪
#CREATEDATE:2020/08/21
#illustration:直接输入表名和文件名,自动生成 ctl控制文件,将数据文件入库
usage()
{
echo "Example: ./auto_exec_ctl.sh comm a_table /usr/temp/a_temp_gupx_20200818.txt |"
}
if [ $# -eq 4 ];then
table_user=$1
table_name=$2
into_file=$3
file_separator=$4
[ -f $into_file ] || { echo "文件不存在,请检查" && exit -1; }
echo "变量输入,已经赋值"
else
echo "输入参数个数有误"
usage;
exit -1;
fi
##oracle 数据库变量
username="****"
passwd=${PASS_ODS}
tns="****"
get_tabel_column_info(){
rm -f /usr/temp/*.tmp
#############################################################
###执行get_unl.sql 文件,获取表的字段详情 输出到 temp下的 .temp文件
#############################################################
echo "set heading off
set head off
set pagesize 0
set linesize 3000
set feedback off
set termout off
set trims on" > script/get_unl.sql
echo "spool ./temp/$table_name.tmp
select table_name,column_name,data_type from all_tab_cols where owner=upper('$table_user') and table_name=upper('$table_name') order by column_id;
spool off" >> script/get_unl.sql
echo "exit" >> script/get_unl.sql
echo "已经生成get_unl sql数据脚本 :"
sqlplus -S $username/$passwd@$tns @script/get_unl.sql
file_line=`awk -F $file_separator '{print NF}' $into_file | head -1`
table_line=`cat ./temp/$table_name.tmp | wc -l`
[ $file_line -eq $table_line ] && { echo "输入文件列和load表的列相对应"; } || { echo "输入文件列和load表的列不对应" && exit -1; }
}
exec_ctl_file(){
#############################################################
###根据表的字段详情生成 导出数据的.sql 和 导入数据的.ctl文件
#############################################################
for loop in $(ls temp)
do
#table_name=`echo $loop | awk -F"." '{print $1}'`
table_name=$(basename $loop .tmp)
new_file_select="script/"$table_name".sql"
new_file_ctl="ctl/"$table_name".ctl"
rm -f $new_file_select
rm -f $new_file_ctl
##生成导出 数据的sql
awk 'BEGIN{
print "set heading off"
print "set head off"
print "set pagesize 0"
print "set linesize 3000"
print "set feedback off"
print "set termout off"
print "set trims on"
print "spool unl/'$table_name'.unl"
print "select "
}{
if (NR==1){
if($3~/DATE/) print "to_char("$2",'\''yyyymmddHH24miss'\'')"
else print $2
}
else {
if($3~/DATE/) print "||'\''|'\''||to_char("$2",'\''yyyymmddHH24miss'\'')"
else print "||'\''|'\''||"$2
}
}
END{
print "||'\''|'\''"
print "from '$table_user'."$1";"
print "spool off"
print "exit"
}' temp/"$loop" > $new_file_select
#生成表的ctl文件
awk 'BEGIN{
print "load data"
print "badfile '\'bad/$table_name.bad\''"
print "truncate into table '$table_user'.'$table_name'"
print "fields terminated by " "'$file_separator'"
print "trailing nullcols"
print "("
}{
if (NR==1){
if($3~/DATE/) print ","$2" date '\''yyyymmddHH24miss'\''"
else print $2
}
else {
if($3~/DATE/) print ","$2" date '\''yyyymmddHH24miss'\''"
else print ","$2
}
}
END{
print ")"
}' temp/"$loop" > $new_file_ctl
done
[ $? -eq 0 ] && { echo "ctl控制文件生成完毕"; } || { echo "生成ctl文件失败" && exit -1; }
}
load_data_table(){
#############################################################
###将文件数据sqlldr到目标系统
#############################################################
## check the log directory
if [ -d log ]
then
rm -rf log/*
else
mkdir log
echo "directory log is created"
fi
## check the bad file directory
if [ -d bad ]
then
rm -rf bad/*
else
mkdir bad
fi
##开始load数据
echo "-------begin load data---------"
#for loop in `ls script`
#do
# if (echo $loop | grep ^[^get_unl]>/dev/null 2>&1)
# then
# #table_name=`echo $loop | awk -F"." '{print $1"."$2}'`
# table_name=$(basename $loop .sql)
# sqlldr $username/$passwd@$tns control=ctl/$table_name.ctl data=$into_file log=log/$table_name.log bad=bad/$table_name.bad
# fi
#done
sqlldr $username/$passwd@$tns control=ctl/$table_name.ctl data=$into_file log=log/$table_name.log bad=bad/$table_name.bad
for loop in `ls log`
do
echo "the sqlldr log file << $loop >> contain : "
grep -E 'not load|Total|success' log/$loop | grep -v 'thread'
echo ""
done
echo "------end load----------------"
}
#######
run(){
get_tabel_column_info;
exec_ctl_file;
load_data_table;
}
run;