pyspark rdd 数据持久化


from pyspark import SparkContext ,SparkConf

conf=SparkConf().setAppName("miniProject").setMaster("local[4]")
#conf=SparkConf().setAppName("lg").setMaster("spark://192.168.10.182:7077")
sc = SparkContext(conf=conf)

rdd = sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
rdd.saveAsSequenceFile("path/to/file")
print(sorted(sc.sequenceFile("path/to/file").collect()))
sc.stop()

[(1, 'a'), (2, 'aa'), (3, 'aaa')]
posted @ 2019-03-07 22:28  luoganttcc  阅读(758)  评论(0编辑  收藏  举报