pyspark aggregate

from pyspark import SparkContext

if __name__ == "__main__":

    sc = SparkContext('local', 'aggregate')    
    nums = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])

    sum_cnt = nums.aggregate(
        (0,0), #initial value
        (lambda acc, value: (acc[0] + value, acc[1] + 1)), #combine value with acc
        (lambda acc1, acc2: (acc1[0]+acc2[0],acc1[1]+acc2[1])) #combine accumulators
    )

    print( "mean: ", round(sum_cnt[0]/float(sum_cnt[1]),4))
mean:  5.5
posted @ 2022-08-19 22:58  luoganttcc  阅读(2)  评论(0编辑  收藏  举报