下面是一个azakaban调度大数据脚本的例子

1、首先上传job,利用定时任务将日志文件上传到hdfs

# upload.job
type=command
command=bash uploadFile2Hdfs.sh
#!/bin/bash

#set java env
export JAVA_HOME=/soft/jdk/
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH

#set hadoop env
export HADOOP_HOME=/soft/hadoop/
export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH


#版本1的问题:
#虽然上传到Hadoop集群上了,但是原始文件还在。如何处理?
#日志文件的名称都是xxxx.log1,再次上传文件时,因为hdfs上已经存在了,会报错。如何处理?

#如何解决版本1的问题
#       1、先将需要上传的文件移动到待上传目录
#    2、在讲文件移动到待上传目录时,将文件按照一定的格式重名名
#        /export/software/hadoop.log1   /export/data/click_log/xxxxx_click_log_{date}


#日志文件存放的目录
log_src_dir=/home/centos/logs/log/ 

#待上传文件存放的目录
log_toupload_dir=/home/centos/logs/toupload/

day_01=`date -d'-1 day' +%Y-%m-%d`
syear=`date --date=$day_01 +%Y`
smonth=`date --date=$day_01 +%m`
sday=`date --date=$day_01 +%d`

#echo $day_01
#echo $syear
#echo $smonth
#echo $sday






#日志文件上传到hdfs的根路径
hdfs_root_dir=/data/clickLog/$syear/$smonth/$sday

hadoop fs -mkdir -p $hdfs_root_dir

#打印环境变量信息
echo "envs: hadoop_home: $HADOOP_HOME"


#读取日志文件的目录,判断是否有需要上传的文件
echo "log_src_dir:"$log_src_dir
ls $log_src_dir | while read fileName
do
    if [[ "$fileName" == access.log ]]; then
    # if [ "access.log" = "$fileName" ];then
        date=`date +%Y_%m_%d_%H_%M_%S`
        #将文件移动到待上传目录并重命名
        #打印信息
        echo "moving $log_src_dir$fileName to $log_toupload_dir"xxxxx_click_log_$fileName"$date"
        mv $log_src_dir$fileName $log_toupload_dir"xxxxx_click_log_$fileName"$date
        #将待上传的文件path写入一个列表文件willDoing
        echo $log_toupload_dir"xxxxx_click_log_$fileName"$date >> $log_toupload_dir"willDoing."$date
    fi
    
done
#找到列表文件willDoing
ls $log_toupload_dir | grep will |grep -v "_COPY_" | grep -v "_DONE_" | while read line
do
    #打印信息
    echo "toupload is in file:"$line
    #将待上传文件列表willDoing改名为willDoing_COPY_
    mv $log_toupload_dir$line $log_toupload_dir$line"_COPY_"
    #读列表文件willDoing_COPY_的内容(一个一个的待上传文件名)  ,此处的line 就是列表中的一个待上传文件的path
    cat $log_toupload_dir$line"_COPY_" |while read line
    do
        #打印信息
        echo "puting...$line to hdfs path.....$hdfs_root_dir"
        hadoop fs -put $line $hdfs_root_dir
    done    
    mv $log_toupload_dir$line"_COPY_"  $log_toupload_dir$line"_DONE_"
done

2、清理数据job,将数据清理存入hdfs

# clean.job
type=command
dependencies=upload
command=bash clean.sh
#!/bin/bash
export JAVA_HOME=/usr/local/soft/java
export JRE_HOME=${JAVA_HOME}/jre 
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib 
export PATH=${JAVA_HOME}/bin:$PATH
#set hadoop env
export HADOOP_HOME=/usr/local/software/hadoop
export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH

#log_local_dir=/home/hadoop/flume/

#log_hdfs_dir=/test/2017/7/

day_01=`date -d'-1 day' +%Y-%m-%d`
syear=`date --date=$day_01 +%Y`
smonth=`date --date=$day_01 +%m`
sday=`date --date=$day_01 +%d`

#echo $day_01
#echo $syear
#echo $smonth
#echo $sday

log_hdfs_dir=/data/clickLog/$syear/$smonth/$sday
#echo $log_hdfs_dir
click_log_clean=com.xiaofeiyang.AccessLogDriver

clean_dir=/cleaup/$syear/$smonth/$sday

echo "hadoop jar /home/centos/hivedemo/hiveaad.jar $click_log_clean $log_hdfs_dir $clean_dir"
hadoop fs -rm -r -f $clean_dir
hadoop jar /home/hadoop/hadoop.jar $click_log_clean $log_hdfs_dir $clean_dir

3、将清理的数据导入hive

# hivesql.job
type=command
dependencies=clean
command=bash hivesql.sh
#!/bin/bash
export JAVA_HOME=/soft/jdk
export JRE_HOME=${JAVA_HOME}/jre 
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib 
export PATH=${JAVA_HOME}/bin:$PATH
#set hadoop env
export HADOOP_HOME=/soft/hadoop
export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH

export HIVE_HOME=/soft/hive
export PATH=${HIVE_HOME}/bin:$PATH

log_local_dir=/home/centos/flume/

#log_hdfs_dir=/test/2017/7/

day_01=`date -d'-1 day' +%Y-%m-%d`
syear=`date --date=$day_01 +%Y`
smonth=`date --date=$day_01 +%m`
sday=`date --date=$day_01 +%d`

#echo $day_01
#echo $syear
#echo $smonth
#echo $sday

log_hdfs_dir=/test/$syear/$smonth/$sday
#echo $log_hdfs_dir
click_log_clean=com.it18zhang.project.mr.AccessLogDriver

clean_dir=/cleaup/$syear/$smonth/$sday
HQL_origin="load data inpath '$clean_dir' into table mydb2.accesslog"
#HQL_origin="create external table db2.access(ip string,day string,url string,upflow string) row format delimited fields terminated by ',' location '$clean_dir'"
#echo $HQL_origin

hive -e  "$HQL_origin"

4、根据清理的数据生成统计数据

# ip.job
type=command
dependencies=hivesqljob
command=bash ip.sh
#!/bin/bash
export JAVA_HOME=/soft/jdk
export JRE_HOME=${JAVA_HOME}/jre 
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib 
export PATH=${JAVA_HOME}/bin:$PATH
#set hadoop env
export HADOOP_HOME=/soft/hadoop
export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH

export HIVE_HOME=/soft/hive
export PATH=${HIVE_HOME}/bin:$PATH

log_local_dir=/home/centos/flume/

#log_hdfs_dir=/test/2017/7/

day_01=`date -d'-1 day' +%Y-%m-%d`
syear=`date --date=$day_01 +%Y`
smonth=`date --date=$day_01 +%m`
sday=`date --date=$day_01 +%d`

#echo $day_01
#echo $syear
#echo $smonth
#echo $sday

log_hdfs_dir=/test/$syear/$smonth/$sday
#echo $log_hdfs_dir
click_log_clean=com.it18zhang.project.mr.AccessLogDriver

clean_dir=/cleaup/$syear/$smonth/$sday

HQL_origin="insert into  mydb2.upflow  select ip,sum(upflow) as sum from mydb2.accesslog group by ip order by sum desc "
#echo $HQL_origin

hive -e  "$HQL_origin"

5、将统计数据导入mysql

# mysql.job
type=command
dependencies=ipjob
command=bash mysql.sh
#!/bin/bash
export JAVA_HOME=/soft/jdk
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
#set hadoop env
export HADOOP_HOME=/soft/hadoop
export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH

export HIVE_HOME=/soft/hive
export PATH=${HIVE_HOME}/bin:$PATH
export SQOOP_HOME=/soft/sqoop
export PATH=${SQOOP_HOME}/bin:$PATH
sqoop export --connect \
 jdbc:mysql://s201:3306/userdb \
 --username sqoop --password sqoop --table upflow --export-dir \
/user/hive/warehouse/mydb2.db/upflow --input-fields-terminated-by ','

 

posted on 2020-08-18 14:41  清浊  阅读(450)  评论(0编辑  收藏  举报