美文网首页程序员
Spark RDD操作练习 2

Spark RDD操作练习 2

作者: 蓝色的雪啦 | 来源:发表于2017-04-26 17:41 被阅读60次
    scala> 
    scala> def func(index: Int, iter: Iterator[(Int)]): Iterator[String] = {
         | iter.toList.map(x => "[partId: " + index + ", val: " + x + "]").iterator
         | }
    func: (index: Int, iter: Iterator[Int])Iterator[String]
    
    scala> z.mapPartitionsWithIndex(func).collect
    res29: Array[String] = Array([partId: 0, val: 1], [partId: 0, val: 2], [partId: 0, val: 3], [partId: 1, val: 4], [partId: 1, val: 5], [partId: 1, val: 6])
    
    scala> z.aggregate(0)(math.max(_, _), _ + _)
    res30: Int = 9
    
    scala> z.aggregate(5)(math.max(_, _), _ + _)
    res31: Int = 16
    
    scala> val z = sc.parallelize(List("a", "b", "c", "d", "e", "f"), 2)
    z: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[50] at parallelize at <console>:24
    
    scala> def func(index: Int, iter: Iterator[(String)]): Iterator[String] = {
         | iter.toList.map(x => "[partId:" + index + ",val: " + x + "]").iterator
         | }
    func: (index: Int, iter: Iterator[String])Iterator[String]
    
    scala> z.mapPartitionsWithIndex(func).collect
    res35: Array[String] = Array([partId:0,val: a], [partId:0,val: b], [partId:0,val: c], [partId:1,val: d], [partId:1,val: e], [partId:1,val: f])
    
    scala> z.aggregate("")(_+_, _+_)
    res36: String = abcdef
    
    scala> z.aggregate("x")(_+_, _+_)
    res37: String = xxabcxdef
    
    scala> val z = sc.parallelize(List("12","23", "234", "268", "98"), 2)
    z: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[52] at parallelize at <console>:24
    
    scala> z.aggregate("")((x,y) => math.max(x.length, y.length).toString, (x,y) => x+y)
    res38: String = 22                                                              
    
    scala> z.mapPartitionsWithIndex(func).collect
    res39: Array[String] = Array([partId:0,val: 12], [partId:0,val: 23], [partId:1,val: 234], [partId:1,val: 268], [partId:1,val: 98])
    
    scala> z.aggregate("")((x,y) => math.max(x.length, y.length).toString, (x,y) => x+y)
    res41: String = 22
    
    scala> val z = sc.parallelize(List("12","23","345","4567"), 2)
    z: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[55] at parallelize at <console>:24
    
    scala> z.aggregate("")((x,y) => math.max(x.length, y.length).toString, (x,y) => x+y)
    res44: String = 42
    
    scala> z.aggregate("")((x,y) => math.min(x.length, y.length).toString, (x,y) => x+ y)
    res51: String = 11
    
    scala> val z = sc.parallelize(List("12", "23", "234", ""), 2)
    z: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[56] at parallelize at <console>:24
    
    scala> z.aggregate("")((x,y) => math.min(x.length, y.length).toString, (x,y) => x+y)
    res53: String = 01                                                              
    
    scala> z.aggregate("")((x,y) => math.min(x.length, y.length).toString, (x,y) => x+y)
    res55: String = 10
    
    scala> 
    
    
    
    scala> val z = sc.parallelize(List("23", "12", "", "345"), 2)
    z: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[57] at parallelize at <console>:24
    
    scala> z.aggregate("")((x,y) => math.min(x.length, y.length).toString, (x,y) => x+y)
    res57: String = 11
    
    scala> val rdd = sc.parallelize(List(("cat", 2),("cat", 5),("mouse", 4), ("cat", 12),("dog", 12),("mouse", 2)), 2)
    rdd: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[58] at parallelize at <console>:24
    
    scala> def func(index: Int, iter: Iterator[(String, Int)]): Iterator[String] ={
         | iter.toList.map(x => "[partId:" + index + ",val:" + x + "]").iterator
         | }
    func: (index: Int, iter: Iterator[(String, Int)])Iterator[String]
    
    scala> rdd.mapPartitionsWithIndex(func).collect
    res59: Array[String] = Array([partId:0,val:(cat,2)], [partId:0,val:(cat,5)], [partId:0,val:(mouse,4)], [partId:1,val:(cat,12)], [partId:1,val:(dog,12)], [partId:1,val:(mouse,2)])
    
    scala> rdd.aggregateByKey(0)(math.max(_,_), _+_).collect
    res60: Array[(String, Int)] = Array((dog,12), (cat,17), (mouse,6))
    
    scala> 
    
    scala> 
    
    scala> rdd.aggregateByKey(0)(math.max(_,_), _+_).collect
    res62: Array[(String, Int)] = Array((dog,12), (cat,17), (mouse,6))
    
    scala> rdd.aggregateByKey(100)(math.max(_,_), _+_).collect
    res63: Array[(String, Int)] = Array((dog,100), (cat,200), (mouse,200))
    
    scala> val x = sc.parallelize(List(1,2,3,4,5))
    x: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[65] at parallelize at <console>:24
    
    scala> val y = sc.parallelize(List(6,7,8,9,10))
    y: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[66] at parallelize at <console>:24
    
    scala> x.cartesian(y).collect
    res65: Array[(Int, Int)] = Array((1,6), (1,7), (1,8), (1,9), (1,10), (2,6), (2,7), (2,8), (2,9), (2,10), (3,6), (3,7), (3,8), (3,9), (3,10), (4,6), (4,7), (4,8), (4,9), (4,10), (5,6), (5,7), (5,8), (5,9), (5,10))
    
    
    scala> sc.setCheckpointDir("hdfs://mini1:9000/input")
    
    scala> val a = sc.parallelize(1 to 4)
    a: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[69] at parallelize at <console>:24
    
    scala> a.checkpoint
    
    scala> a.count
    res77: Long = 4                                                                 
    
    scala> res77
    res78: Long = 4
    
    scala> 
    
    scala> val y = sc.parallelize(1 to 10, 10)
    y: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[71] at parallelize at <console>:24
    
    scala> y.collect
    res79: Array[Int] = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
    
    scala> y.partitions.length
    res80: Int = 10
    
    scala> val z = y.coalesce(2, false)
    z: org.apache.spark.rdd.RDD[Int] = CoalescedRDD[72] at coalesce at <console>:26
    
    scala> z.partition
    partitioner   partitions
    
    scala> z.partitions.length
    res81: Int = 2
    
    scala> val a = sc.parallelize(List(1,2,1,3), 1)
    a: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[73] at parallelize at <console>:24
    
    scala> val b = a.map((_, "b"))
    b: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[74] at map at <console>:26
    
    scala> val c = a.map((_, "c"))
    c: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[75] at map at <console>:26
    
    scala> b.collect
    res82: Array[(Int, String)] = Array((1,b), (2,b), (1,b), (3,b))
    
    scala> b.cogroup(c).collect
    res83: Array[(Int, (Iterable[String], Iterable[String]))] = Array((1,(CompactBuffer(b, b),CompactBuffer(c, c))), (3,(CompactBuffer(b),CompactBuffer(c))), (2,(CompactBuffer(b),CompactBuffer(c))))
    
    scala> c.collect
    res84: Array[(Int, String)] = Array((1,c), (2,c), (1,c), (3,c))
    
    scala> val d = a.map((_, "d"))
    d: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[78] at map at <console>:26
    
    scala> b.cogroup(c,d).collect
    res85: Array[(Int, (Iterable[String], Iterable[String], Iterable[String]))] = Array((1,(CompactBuffer(b, b),CompactBuffer(c, c),CompactBuffer(d, d))), (3,(CompactBuffer(b),CompactBuffer(c),CompactBuffer(d))), (2,(CompactBuffer(b),CompactBuffer(c),CompactBuffer(d))))
    
    scala> val x = sc.parallelize(List((1, "apple"), (2, "banana"), (3, "orange"), (4, "kiwi")), 2)
    x: org.apache.spark.rdd.RDD[(Int, String)] = ParallelCollectionRDD[81] at parallelize at <console>:24
    
    scala> val y = sc.parallelize(List((5,"computer"),(1, "laptop"),(1, "desktop"),(4, "ipad")), 2)
    y: org.apache.spark.rdd.RDD[(Int, String)] = ParallelCollectionRDD[82] at parallelize at <console>:24
    
    scala> x.cogroup(y).collect
    res86: Array[(Int, (Iterable[String], Iterable[String]))] = Array((4,(CompactBuffer(kiwi),CompactBuffer(ipad))), (2,(CompactBuffer(banana),CompactBuffer())), (1,(CompactBuffer(apple),CompactBuffer(desktop, laptop))), (3,(CompactBuffer(orange),CompactBuffer())), (5,(CompactBuffer(),CompactBuffer(computer))))
    
    scala> 
    
    scala> val a = sc.parallelize(List(1,2,1,3), 1)
    a: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[85] at parallelize at <console>:24
    
    scala> val b = a.zip(a)
    b: org.apache.spark.rdd.RDD[(Int, Int)] = ZippedPartitionsRDD2[86] at zip at <console>:26
    
    scala> b.collect
    res87: Array[(Int, Int)] = Array((1,1), (2,2), (1,1), (3,3))
    
    scala> b.collectAsMap
    res88: scala.collection.Map[Int,Int] = Map(2 -> 2, 1 -> 1, 3 -> 3)
    
    scala> 
    
    scala> val a = sc.parallelize(List("dog", "cat","gnu","salmon","rabbit","turkey","wolf","bear","bee"), 3)
    a: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[87] at parallelize at <console>:24
    
    scala> val b = sc.parallelize(List(1,1,2,2,2,1,2,2,2), 3)
    b: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[88] at parallelize at <console>:24
    
    scala> val c = b.zip(a)
    c: org.apache.spark.rdd.RDD[(Int, String)] = ZippedPartitionsRDD2[89] at zip at <console>:28
    
    scala> c.collect
    res89: Array[(Int, String)] = Array((1,dog), (1,cat), (2,gnu), (2,salmon), (2,rabbit), (1,turkey), (2,wolf), (2,bear), (2,bee))
    
    scala> val d = c.combineByKey(List(_), (x: List[String], y:String) => y :: x, (x:List[String], y: List[String]) => x ::: y)
    d: org.apache.spark.rdd.RDD[(Int, List[String])] = ShuffledRDD[90] at combineByKey at <console>:34
    
    scala> d.collect
    res90: Array[(Int, List[String])] = Array((1,List(turkey, cat, dog)), (2,List(bee, bear, wolf, gnu, rabbit, salmon)))
    
    scala> val d = c.combineByKey(List(_), (x: List[String], y: String) => x :+ y, (x:List[String], y: List[String]) => x ::: y)
    d: org.apache.spark.rdd.RDD[(Int, List[String])] = ShuffledRDD[91] at combineByKey at <console>:34
    
    scala> d.collect
    res91: Array[(Int, List[String])] = Array((1,List(dog, cat, turkey)), (2,List(wolf, bear, bee, gnu, salmon, rabbit)))
    
    scala> val d = c.combineByKey(List(_), (x: List[String], y: String) => x :+ y, (x:List[String], y: List[String]) => x ::: y).collect
    d: Array[(Int, List[String])] = Array((1,List(turkey, dog, cat)), (2,List(wolf, bear, bee, gnu, salmon, rabbit)))
    
    
    scala> c
    res92: org.apache.spark.rdd.RDD[(Int, String)] = ZippedPartitionsRDD2[89] at zip at <console>:28
    
    scala> c.collect
    res93: Array[(Int, String)] = Array((1,dog), (1,cat), (2,gnu), (2,salmon), (2,rabbit), (1,turkey), (2,wolf), (2,bear), (2,bee))
    
    scala> c.partitions.length
    res94: Int = 3
    
    scala> 
    
    scala> def func(index: Int, iter: Iterator[(Int)]) : Iterator[String] = {
         | iter.toList.map(x => "id " +index+ ",val"+ x+ "." ).iterator}
    func: (index: Int, iter: Iterator[Int])Iterator[String]
    
    
    scala> c.mapPartitionsWithIndex(func).collect
    
    
    scala> c.partitions.length
    res98: Int = 3
    
    scala> c
    res99: org.apache.spark.rdd.RDD[(Int, String)] = ZippedPartitionsRDD2[89] at zip at <console>:28
    
    scala> c.collect
    res100: Array[(Int, String)] = Array((1,dog), (1,cat), (2,gnu), (2,salmon), (2,rabbit), (1,turkey), (2,wolf), (2,bear), (2,bee))
    
    scala> def func(index: Int, iter: Iterator[(Int)]) : Iterator[String] = {
         | iter.toList.map(x => "id " +index+ ",val"+ x._2 + "." ).iterator}
    
    scala> 
    
    scala> 
    
    scala> val a = sc.parallelize(1 to 10000 , 20)
    a: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[96] at parallelize at <console>:24
    
    scala> val b = a++a++a++a++a
    b: org.apache.spark.rdd.RDD[Int] = UnionRDD[100] at $plus$plus at <console>:26
    
    scala> b.collect
    res101: Array[Int] = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176...
    scala> val b = a++a
    b: org.apache.spark.rdd.RDD[Int] = UnionRDD[101] at $plus$plus at <console>:26
    
    scala> b.collect
    res102: Array[Int] = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176...
    
    scala> b.countApproxDistinct(0.05)
    res104: Long = 9760                                                             
    
    scala> b.countApproxDistinct(0.01)
    res105: Long = 9947
    
    scala> b.countApproxDistinct(0.001)
    res106: Long = 10000                                                            
    

    相关文章

      网友评论

        本文标题:Spark RDD操作练习 2

        本文链接:https://www.haomeiwen.com/subject/sdvizttx.html