是aggregateByKey的简化版。
分区内和分区间的计算规则相同
foldByKey(初始值)(相同的计算规则)
package com.atguigu
import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, Partitioner, SparkConf, SparkContext}
object Trans {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("Spark01_Partition")
//构建spark上下文对象
val sc = new SparkContext(conf)
val rdd: RDD[(String, Int)] = sc.makeRDD(List(("a",1),("b",2),("b",3),("a",3),("b",4),("a",5)),2)
//val rdd2: RDD[(String, Int)] = rdd.foldByKey(0)((x,y) =>{x+y})
val rdd2: RDD[(String, Int)] = rdd.foldByKey(0)(_+_)
rdd2.collect().foreach(println)
sc.stop()
}
}
(b,9)
(a,9)
网友评论