set hive.execution.engine=spark;
set hive.exec.parallel=true;
set hive.exec.parallel.thread.number=8;
set hive.exec.compress.intermediate=true;
set hive.intermediate.compression.codec=org.apache.hadoop.io.compress.SnappyCodec;
set hive.intermediate.compression.type=BLOCK;
set hive.exec.compress.output=true;
set mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec;
set mapred.output.compression.type=BLOCK;

 

set mapreduce.job.queuename=uat2;(设置hive的运行队列)

set hive.exec.reducers.max=2400;
set mapreduce.job.reduces=2004;
set hive.exec.reducers.bytes.per.reducer=24;

set mapred.child.java.opts = -Xmx3024m;
set mapreduce.reduce.memory.mb =4096;
set mapreduce.map.memory.mb= 4096;

set hive.exec.parallel.thread.number=16;

-hiveconf  hive.exec.parallel.thread.number=16 

 

posted on 2020-09-21 10:39  风清_云淡  阅读(2225)  评论(0编辑  收藏  举报