蝈蝈大王

  博客园  :: 首页  :: 新随笔  :: 联系 :: 订阅 订阅  :: 管理


import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;

/**
* join(otherDataSet,[numTasks]) 算子:
* 同样的也是按照key将两个RDD中进行汇总操作,会对每个key所对应的两个RDD中的数据进行笛卡尔积计算。
*
*按照key进行分类汇总,并且做笛卡尔积
*/
public class JoinOperator {

public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local").setAppName("join");
JavaSparkContext sc = new JavaSparkContext(conf);
List<Tuple2<String,String>> stus = Arrays.asList(
new Tuple2<>("w1","1"),
new Tuple2<>("w2","2"),
new Tuple2<>("w3","3"),
new Tuple2<>("w2","22"),
new Tuple2<>("w1","11")
);
List<Tuple2<String,String>> scores = Arrays.asList(
new Tuple2<>("w1","a1"),
new Tuple2<>("w2","a2"),
new Tuple2<>("w2","a22"),
new Tuple2<>("w1","a11"),
new Tuple2<>("w3","a3")
);

JavaPairRDD<String,String> stusRdd = sc.parallelizePairs(stus);
JavaPairRDD<String,String> scoresRdd = sc.parallelizePairs(scores);
JavaPairRDD<String,Tuple2<String,String>> result = stusRdd.join(scoresRdd);

result.foreach(new VoidFunction<Tuple2<String, Tuple2<String, String>>>() {
@Override
public void call(Tuple2<String, Tuple2<String, String>> tuple) throws Exception {
System.err.println(tuple._1+":"+tuple._2);
}
});

}
}

微信扫描下图二维码加入博主知识星球,获取更多大数据、人工智能、算法等免费学习资料哦!

 

posted on 2019-09-27 16:41  蝈蝈大王  阅读(2079)  评论(0编辑  收藏  举报