spark dataset join 使用方法java

 1 dataset<Row> df1,df2,df3
 2 
 3 //该方法可以执行成功
 4 df3= df1.join(df2,"post_id").selectExpr("hostname,request_date,post_id,title,author,name as category".split(","));  //innner join
 5 
 6 acc = df1.withColumnRenamed("post_id", "post_id_acc");
 7 //该方法join同名列的时候,要重命名,否则会报错:重名列(通过drop删除无效,不知道是什么原因)
 8 post_categories = acc.join(post_one_cat,acc.col("post_id_acc").equalTo(post_one_cat.col("post_id")),"left_outer").join(categories, post_one_cat.col("cate_id").equalTo(categories.col("id")),"left_outer").selectExpr("hostname,request_date,post_id_acc as post_id,title,author,name as category".split(","));
 9 //post_categories = acc.join(post_one_cat,acc.col("post_id_acc").equalTo(post_one_cat.col("post_id")),"left_outer").join(categories, post_one_cat.col("cate_id").equalTo(categories.col("id")),"left_outer").withColumnRenamed("name", "category")
.withColumnRenamed("post_id_cat", "post_id");
10 //该方法可以执行成功 

11 df3= df1.join(df2,JavaConverters.asScalaIteratorConverter(Arrays.asList("post_id").iterator()).asScala().toSeq(),"left_outer").join(cat, JavaConverters.asScalaIteratorConverter(Arrays.asList("cate_id").iterator()).asScala().toSeq(),"left_outer").selectExpr("hostname,request_date,post_id,title,author,name as category".split(","));

 

posted @ 2018-09-03 16:03  一直爬行的蜗牛牛  阅读(5314)  评论(0编辑  收藏  举报