hive 使用spark引擎 hive -e

#!/bin/bash
#spark参数按照实际情况配置
hive -e "
SET mapreduce.job.queuename=batch;
set hive.execution.engine=spark;
set spark.executor.memory=4g;
set spark.executor.cores=3;
set spark.executor.instances=16;
set spark.serializer=org.apache.spark.serializer.KryoSerializer;

可执行sql

"

 

posted @ 2021-12-29 09:39  cup_leo  阅读(587)  评论(0编辑  收藏  举报