Kudu Native RDD

Spark与Kudu的集成同事提供了kudu RDD

import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{Row, SparkSession}

/**
  * Created by angel;
  */
object KuduNativeRDD {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("AcctfileProcess")
      //设置Master_IP并设置spark参数
      .setMaster("local")
      .set("spark.worker.timeout", "500")
      .set("spark.cores.max", "10")
      .set("spark.rpc.askTimeout", "600s")
      .set("spark.network.timeout", "600s")
      .set("spark.task.maxFailures", "1")
      .set("spark.speculationfalse", "false")
      .set("spark.driver.allowMultipleContexts", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sparkContext = SparkContext.getOrCreate(sparkConf)
    val sqlContext = SparkSession.builder().config(sparkConf).getOrCreate().sqlContext
    //使用spark创建kudu表
    val kuduMasters = "hadoop01:7051,hadoop02:7051,hadoop03:7051"
    val kuduContext = new KuduContext(kuduMasters, sqlContext.sparkContext)
    //TODO 1:定义kudu表
    val kuduTableName = "spark_kudu_tbl"
    //TODO 2:指定想要的列
    val kuduTableProjColumns = Seq("name", "age")

    //TODO 3:读取表,将数据转换成rdd
    val custRDD = kuduContext.kuduRDD(sparkContext, kuduTableName, kuduTableProjColumns)

    //TODO 4:将rdd数据转换成tuple
    val custTuple = custRDD.map {
      case Row(name: String, age: Int) => (name, age)
    }
    //TODO 5:打印
    custTuple.collect().foreach(println(_))
  }
}

 

posted @ 2018-01-03 21:54  niutao  阅读(375)  评论(0编辑  收藏  举报