Spark2.x写Hbase1-2.x

import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.{SparkConf, SparkContext}


/**
  * Spark写HBase
  */
object SparkWriteHbase {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("SparkWriteHBase").setMaster("local")
    val sc = new SparkContext(conf)
    val tableName = "student"


    sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE, tableName)

    val job = new Job(sc.hadoopConfiguration)

    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])



    val inDataRDD = sc.makeRDD(Array("3,Rongcheng,M,26","4,Guanhua,M,27"))

    val rdd = inDataRDD.map(_.split(",")).map(arr=>{
      val put = new Put(Bytes.toBytes(arr(0)))
      put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("name"),Bytes.toBytes(arr(1)))
      put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("gender"),Bytes.toBytes(arr(2)))
      put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("age"),Bytes.toBytes(arr(3)))
      (new ImmutableBytesWritable(),put)
    })

    rdd.saveAsNewAPIHadoopDataset(job.getConfiguration)


  }

}

 

posted on 2020-04-22 08:02  0x153_小波  阅读(289)  评论(0编辑  收藏  举报