美文网首页
数据算法 Hadoop/Spark大数据处理---第二章

数据算法 Hadoop/Spark大数据处理---第二章

作者: _Kantin | 来源:发表于2018-01-04 21:47 被阅读65次

在上一章中,我们实现的虽然是二次排序,但是排序的value是String或Integer,但是假如换一下,把value换成元组,也即是本章所给的例子:LIMM,2013-12-05,97.65 ,KKIA,2014-15-05,107.65等。


在Hadoop中,在map输出的时候,会进行分区,在区内再对key进行排序,分区的作用就是确定哪一些内容发到那个reduce,区内排序则是为reduce的排序做好基础


因此在本章中,也将先设置分区,确定哪些相同的key发送到那些区中,然后在区中在对key进行排序(因为reduce需要),做好在进行组内排序。从输入转换成(key,a1)(a1,{a2,a3……})这样的格式


类名 描述
CompositeKey 定义一个组合键
NaturalValue 定义一个自然键
NaturalKeyPartitioner 定义自然键分区
NaturalKeyGroupingComparator 定义自然键如何分组
CompositeKeyComparator 定义区内组合键排序
SecondarySortDriver 主程序入口类
SecondarySortMapper Map函数
SecondarySortReducer Reduce函数

1.SecondarySortDriver.java

 Configuration conf = new Configuration();
        Job job = new Job(conf, "Secondary Sort");

        // add jars to distributed cache
        HadoopUtil.addJarsToDistributedCache(conf, "/lib/");
        
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        if (otherArgs.length != 2) {
           System.err.println("Usage: SecondarySortDriver <input> <output>");
           System.exit(1);
        }        
       
        job.setJarByClass(SecondarySortDriver.class);
        job.setJarByClass(SecondarySortMapper.class);
        job.setJarByClass(SecondarySortReducer.class);
        
       // set mapper and reducer
        job.setMapperClass(SecondarySortMapper.class);
        job.setReducerClass(SecondarySortReducer.class);
        
        // 定义了自然键和组合键的bean
        job.setMapOutputKeyClass(CompositeKey.class);
        job.setMapOutputValueClass(NaturalValue.class);
              
        // define reducer's output key-value
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        //定义自然键的分区
        job.setPartitionerClass(NaturalKeyPartitioner.class);
        //定义分区内自然键的排序
        job.setGroupingComparatorClass(NaturalKeyGroupingComparator.class);
        //定义组合键的排序
        job.setSortComparatorClass(CompositeKeyComparator.class);
        
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

        job.waitForCompletion(true);

2.CompositeKey.java

//自然键是stockSymbol,组合键是{stockSymbol,timestamp}
private String stockSymbol;
private long timestamp;

3.NaturalValue.java

    //定义新组合的自然键{时间戳,价格}
    private long timestamp;
    private double price;

4.NaturalKeyPartitioner.java

//根据相同的StockSymbol的hash值分配分区
@Override
    public int getPartition(CompositeKey key, 
                            NaturalValue value,
                            int numberOfPartitions) {
        return Math.abs((int) (hash(key.getStockSymbol()) % numberOfPartitions));
    }

5.NaturalKeyGroupingComparator.java

//对分区内的键进行排序,因为reduce需要
@Override
    public int compare(WritableComparable wc1, WritableComparable wc2) {
        CompositeKey ck1 = (CompositeKey) wc1;
        CompositeKey ck2 = (CompositeKey) wc2;
        return ck1.getStockSymbol().compareTo(ck2.getStockSymbol());
    }

6.CompositeKeyComparator.java

//定义组合键的排序方法
  @Override
    public int compare(WritableComparable wc1, WritableComparable wc2) {
        CompositeKey ck1 = (CompositeKey) wc1;
        CompositeKey ck2 = (CompositeKey) wc2;

        int comparison = ck1.getStockSymbol().compareTo(ck2.getStockSymbol());
        //当两个键的值相同的时候才比较时间戳
        if (comparison == 0) {
            // stock symbols are equal here
            if (ck1.getTimestamp() == ck2.getTimestamp()) {
                return 0;
            }
            else if (ck1.getTimestamp() < ck2.getTimestamp()) {
                return -1;
            }
            else {
                return 1;
            }
        }
        else {
            return comparison;
        }
    }

7.SecondarySortMapper.java

    //定义两个自然键
   private final CompositeKey reducerKey = new CompositeKey();
   private final NaturalValue reducerValue = new NaturalValue();
       
    
    @Override
    public void map(LongWritable key, 
                    Text value,
                    Context context) 
       throws IOException, InterruptedException {
               
       String[] tokens = StringUtils.split(value.toString().trim(), ",");
       if (tokens.length == 3) {
          // tokens[0] = stokSymbol
          // tokens[1] = timestamp (as date)
          // tokens[2] = price as double
          Date date = DateUtil.getDate(tokens[1]);
          if (date == null) {
             return;
          }
          long timestamp = date.getTime();
          //设置自然键和组合键
          reducerKey.set(tokens[0], timestamp); 
          reducerValue.set(timestamp, Double.parseDouble(tokens[2]));
          // emit key-value pair
          context.write(reducerKey, reducerValue);
       }

8.SecondarySortReducer.java

//reduce类
public void reduce(CompositeKey key, 
                       Iterable<NaturalValue> values,
                       Context context)
       throws IOException, InterruptedException {

        //用builder对value进行包装,已经是排序好的了
        StringBuilder builder = new StringBuilder();
        for (NaturalValue data : values) {
             builder.append("(");
             String dateAsString = DateUtil.getDateAsString(data.getTimestamp());
             double price = data.getPrice();
             builder.append(dateAsString);
             builder.append(",");
             builder.append(price);          
             builder.append(")");
        }
        //取key和保存好的{values},然后保存到在SecondarySortDriver中设置的输出目录
        context.write(new Text(key.getStockSymbol()), new Text(builder.toString()));
    } // reduce

相关文章

网友评论

      本文标题:数据算法 Hadoop/Spark大数据处理---第二章

      本文链接:https://www.haomeiwen.com/subject/olmdnxtx.html