hive 使用spark引擎 hive -e
#!/bin/bash #spark参数按照实际情况配置 hive -e " SET mapreduce.job.queuename=batch; set hive.execution.engine=spark; set spark.executor.memory=4g; set spark.executor.cores=3; set spark.executor.instances=16; set spark.serializer=org.apache.spark.serializer.KryoSerializer; 可执行sql "