spark操作Kudu之写 - 使用DataFrame API

 

在通过DataFrame API编写时,目前只支持一种模式“append”。尚未实现的“覆盖”模式

import org.apache.kudu.spark.kudu._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

/**
  * Created by angel;
  */
object DataFrame_write {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("AcctfileProcess")
      //设置Master_IP并设置spark参数
      .setMaster("local")
      .set("spark.worker.timeout", "500")
      .set("spark.cores.max", "10")
      .set("spark.rpc.askTimeout", "600s")
      .set("spark.network.timeout", "600s")
      .set("spark.task.maxFailures", "1")
      .set("spark.speculationfalse", "false")
      .set("spark.driver.allowMultipleContexts", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sparkContext = SparkContext.getOrCreate(sparkConf)
    val sqlContext = SparkSession.builder().config(sparkConf).getOrCreate().sqlContext
    //TODO 1:定义表名
    val kuduTableName = "spark_kudu_tbl"
    val kuduMasters = "hadoop01:7051,hadoop02:7051,hadoop03:7051"
    //使用spark创建kudu表
    val kuduContext = new KuduContext(kuduMasters, sqlContext.sparkContext)
    //TODO 2:准备数据
    val customersAppend = Array(
      Customer("bob", 30, "boston"),
      Customer("charlie", 23, "san francisco"))
    import sqlContext.implicits._
    //TODO 3:配置kudu参数
    val kuduOptions: Map[String, String] = Map(
      "kudu.table"  -> kuduTableName,
      "kudu.master" -> kuduMasters)
    //TODO 4:将数据转化成dataframe
    val customersAppendDF = sparkContext.parallelize(customersAppend).toDF()

    //TODO 5:执行写入操作(目前只支持追加模式)
    customersAppendDF.write.options(kuduOptions).mode("append").kudu

    //TODO 6:读取数据
    sqlContext.read.options(kuduOptions).kudu.show()
  }
}

 

posted @ 2018-01-02 21:47  niutao  阅读(796)  评论(0编辑  收藏  举报