作用:缩减分区数,用于大数据集过滤后,提高小数据集的执行效率

coalesce如果不传第二个参数默认为false,不产生shuffer

package com.atguigu
import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, Partitioner, SparkConf, SparkContext}
object Trans {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("Spark01_Partition")
//构建spark上下文对象
val sc = new SparkContext(conf)
val rdd: RDD[Int] = sc.makeRDD(1 to 16, 4)
val rdd1: RDD[(Int, Int)] = rdd.mapPartitionsWithIndex((index, items)/** (1,list(1,2,3,4)... */
=> {
items.map((index, _))
})
rdd1.collect().foreach(println)
println("********")
val rdd2: RDD[Int] = rdd.coalesce(2)
val rdd3: RDD[(Int, Int)] = rdd2.mapPartitionsWithIndex((index, items) => {
items.map((index, _))
})
rdd3.collect().foreach(println)
sc.stop()
}
}
(0,1)
(0,2)
(0,3)
(0,4)
(1,5)
(1,6)
(1,7)
(1,8)
(2,9)
(2,10)
(2,11)
(2,12)
(3,13)
(3,14)
(3,15)
(3,16)
********
(0,1)
(0,2)
(0,3)
(0,4)
(0,5)
(0,6)
(0,7)
(0,8)
(1,9)
(1,10)
(1,11)
(1,12)
(1,13)
(1,14)
(1,15)
(1,16)
如果coalesce(2)传入的参数如果大于4,则不做处理,没有变化。
网友评论