美文网首页
从源码角度分析MapReduce运作_二.Map阶段

从源码角度分析MapReduce运作_二.Map阶段

作者: scott_alpha | 来源:发表于2020-02-13 16:27 被阅读0次

    一.目录

    本系列文章对Hadoop知识进行复盘。
    分为五个阶段,Read阶段,Map阶段,Collect阶段,溢写阶段,Combine阶段。
    如下为MapTask类的runNewMapper方法

    private <INKEY,INVALUE,OUTKEY,OUTVALUE>
      void runNewMapper(final JobConf job,
                        final TaskSplitIndex splitIndex,
                        final TaskUmbilicalProtocol umbilical,
                        TaskReporter reporter
                        ) throws IOException, ClassNotFoundException,
                                 InterruptedException {
       ...
        try {
          // 1.Read阶段
          input.initialize(split, mapperContext);
          // 2.Map阶段
          mapper.run(mapperContext);
          mapPhase.complete();
          setPhase(TaskStatus.Phase.SORT);
          statusUpdate(umbilical);
          input.close();
          input = null;
          // 3.Collect阶段 ->4.溢写阶段 ->5.Combiner阶段
          output.close(mapperContext);
          output = null;
        } finally {
          closeQuietly(input);
          closeQuietly(output, mapperContext);
        }
      }
    

    二.Read阶段

    MapTask通过用户编写的RecordReader,从输入InputSplit中解析出一个个key/value。

    /**
     * The context that is given to the Mapper.
     */
    public class MapContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT> 
        extends TaskInputOutputContextImpl<KEYIN,VALUEIN,KEYOUT,VALUEOUT> 
        implements MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
      private RecordReader<KEYIN,VALUEIN> reader;
      private InputSplit split;
    
      public MapContextImpl(Configuration conf, TaskAttemptID taskid,
                            RecordReader<KEYIN,VALUEIN> reader,
                            RecordWriter<KEYOUT,VALUEOUT> writer,
                            OutputCommitter committer,
                            StatusReporter reporter,
                            InputSplit split) {
        super(conf, taskid, writer, committer, reporter);
        this.reader = reader;
        this.split = split;
      }
    
      /**
       * Get the input split for this map.
       */
      public InputSplit getInputSplit() {
        return split;
      }
    
      @Override
      public KEYIN getCurrentKey() throws IOException, InterruptedException {
        return reader.getCurrentKey();
      }
    
      @Override
      public VALUEIN getCurrentValue() throws IOException, InterruptedException {
        return reader.getCurrentValue();
      }
      // Mapper里的run方法会调用nextKeyValue来获取输入值
      @Override
      public boolean nextKeyValue() throws IOException, InterruptedException {
        return reader.nextKeyValue();
      }
    
    }
    

    三.Map阶段

    主要是将解析出的key/value交给用户编写map()函数处理,并产生一系列新的key/value。

    public class Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
    
      /**
       * The <code>Context</code> passed on to the {@link Mapper} implementations.
       */
      public abstract class Context
        implements MapContext<KEYIN,VALUEIN,KEYOUT,VALUEOUT> {
      }
      
      /**
       * Called once at the beginning of the task.
       */
      protected void setup(Context context
                           ) throws IOException, InterruptedException {
        // NOTHING
      }
    
      /**
       * Called once for each key/value pair in the input split. Most applications
       * should override this, but the default is the identity function.
       */
      // 通常会重写此方法,自定义map逻辑
      protected void map(KEYIN key, VALUEIN value, 
                         Context context) throws IOException, InterruptedException {
        context.write((KEYOUT) key, (VALUEOUT) value);
      }
    
      /**
       * Called once at the end of the task.
       */
      protected void cleanup(Context context
                             ) throws IOException, InterruptedException {
        // NOTHING
      }
      
      /**
       * Expert users can override this method for more complete control over the
       * execution of the Mapper.
       * @param context
       * @throws IOException
       */
      public void run(Context context) throws IOException, InterruptedException {
        setup(context);
        try {
          while (context.nextKeyValue()) {
            map(context.getCurrentKey(), context.getCurrentValue(), context);
          }
        } finally {
          cleanup(context);
        }
      }
    }
    

    四.Collect阶段

    在用户编写map()函数中,当数据处理完成后,一般会调用OutputCollector.collect()输出结果。在该函数内部,它会将生成的key/value分区(调用Partitioner),并写入一个环形内存缓冲区中。

    public void close(TaskAttemptContext context
                          ) throws IOException,InterruptedException {
          try {
            collector.flush(); 
          } catch (ClassNotFoundException cnf) {
            throw new IOException("can't find class ", cnf);
          }
          collector.close();
        }
    

    接着看flush方法

     public void flush() throws IOException, ClassNotFoundException,
               InterruptedException {
          LOG.info("Starting flush of map output");
          spillLock.lock();
          try {
            while (spillInProgress) {
              reporter.progress();
              spillDone.await();
            }
            checkSpillException();
    
            final int kvbend = 4 * kvend;
            if ((kvbend + METASIZE) % kvbuffer.length !=
                equator - (equator % METASIZE)) {
              // spill finished
              resetSpill();
            }
            if (kvindex != kvend) {
              kvend = (kvindex + NMETA) % kvmeta.capacity();
              bufend = bufmark;
              LOG.info("Spilling map output");
              LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
                       "; bufvoid = " + bufvoid);
              LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
                       "); kvend = " + kvend + "(" + (kvend * 4) +
                       "); length = " + (distanceTo(kvend, kvstart,
                             kvmeta.capacity()) + 1) + "/" + maxRec);
              // 排序和溢写
              sortAndSpill();
            }
          } catch (InterruptedException e) {
            throw new IOException("Interrupted while waiting for the writer", e);
          } finally {
            spillLock.unlock();
          }
          assert !spillLock.isHeldByCurrentThread();
          // shut down spill thread and wait for it to exit. Since the preceding
          // ensures that it is finished with its work (and sortAndSpill did not
          // throw), we elect to use an interrupt instead of setting a flag.
          // Spilling simultaneously from this thread while the spill thread
          // finishes its work might be both a useful way to extend this and also
          // sufficient motivation for the latter approach.
          try {
            spillThread.interrupt();
            spillThread.join();
          } catch (InterruptedException e) {
            throw new IOException("Spill failed", e);
          }
          // release sort buffer before the merge
          kvbuffer = null;
          // 多个溢写文件进行归并
          mergeParts();
          Path outputPath = mapOutputFile.getOutputFile();
          fileOutputByteCounter.increment(rfs.getFileStatus(outputPath).getLen());
        }
    

    五.溢写阶段

    即“溢写”,当环形缓冲区满后,MapReduce会将数据写到本地磁盘上,生成一个临时文件。需要注意的是,将数据写入本地磁盘之前,先要对数据进行一次本地排序,并在必要时对数据进行合并、压缩等操作。
    溢写阶段详情:
    步骤1:利用快速排序算法对缓存区内的数据进行排序,排序方式是,先按照分区编号Partition进行排序,然后按照key进行排序。这样,经过排序后,数据以分区为单位聚集在一起,且同一分区内所有数据按照key有序。
    步骤2:按照分区编号由小到大依次将每个分区中的数据写入任务工作目录下的临时文件output/spillN.out(N表示当前溢写次数)中。如果用户设置了Combiner,则写入文件之前,对每个分区中的数据进行一次聚集操作。
    步骤3:将分区数据的元信息写到内存索引数据结构SpillRecord中,其中每个分区的元信息包括在临时文件中的偏移量、压缩前数据大小和压缩后数据大小。如果当前内存索引大小超过1MB,则将内存索引写到文件output/spillN.out.index中。

    private void sortAndSpill() throws IOException, ClassNotFoundException,
                                           InterruptedException {
          //approximate the length of the output file to be the length of the
          //buffer + header lengths for the partitions
          final long size = distanceTo(bufstart, bufend, bufvoid) +
                      partitions * APPROX_HEADER_LENGTH;
          FSDataOutputStream out = null;
          try {
            // create spill file
            final SpillRecord spillRec = new SpillRecord(partitions);
            final Path filename =
                mapOutputFile.getSpillFileForWrite(numSpills, size);
            out = rfs.create(filename);
    
            final int mstart = kvend / NMETA;
            final int mend = 1 + // kvend is a valid record
              (kvstart >= kvend
              ? kvstart
              : kvmeta.capacity() + kvstart) / NMETA;
            // 利用快速排序算法
            sorter.sort(MapOutputBuffer.this, mstart, mend, reporter);
            int spindex = mstart;
            final IndexRecord rec = new IndexRecord();
            final InMemValBytes value = new InMemValBytes();
            for (int i = 0; i < partitions; ++i) {
              IFile.Writer<K, V> writer = null;
              try {
                long segmentStart = out.getPos();
                FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out);
                writer = new Writer<K, V>(job, partitionOut, keyClass, valClass, codec,
                                          spilledRecordsCounter);
                if (combinerRunner == null) {
                  // spill directly
                  DataInputBuffer key = new DataInputBuffer();
                  while (spindex < mend &&
                      kvmeta.get(offsetFor(spindex % maxRec) + PARTITION) == i) {
                    final int kvoff = offsetFor(spindex % maxRec);
                    int keystart = kvmeta.get(kvoff + KEYSTART);
                    int valstart = kvmeta.get(kvoff + VALSTART);
                    key.reset(kvbuffer, keystart, valstart - keystart);
                    getVBytesForOffset(kvoff, value);
                    writer.append(key, value);
                    ++spindex;
                  }
                } else {
                  int spstart = spindex;
                  while (spindex < mend &&
                      kvmeta.get(offsetFor(spindex % maxRec)
                                + PARTITION) == i) {
                    ++spindex;
                  }
                  // Note: we would like to avoid the combiner if we've fewer
                  // than some threshold of records for a partition
                  if (spstart != spindex) {
                    combineCollector.setWriter(writer);
                    RawKeyValueIterator kvIter =
                      new MRResultIterator(spstart, spindex);
                    combinerRunner.combine(kvIter, combineCollector);
                  }
                }
    
                // close the writer
                writer.close();
    
                // record offsets
                rec.startOffset = segmentStart;
                rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
                rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
                spillRec.putIndex(rec, i);
    
                writer = null;
              } finally {
                if (null != writer) writer.close();
              }
            }
    
            if (totalIndexCacheMemory >= indexCacheMemoryLimit) {
              // create spill index file
              Path indexFilename =
                  mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions
                      * MAP_OUTPUT_INDEX_RECORD_LENGTH);
              spillRec.writeToFile(indexFilename, job);
            } else {
              indexCacheList.add(spillRec);
              totalIndexCacheMemory +=
                spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
            }
            LOG.info("Finished spill " + numSpills);
            ++numSpills;
          } finally {
            if (out != null) out.close();
          }
        }
    

    进入QuickSort方法

    public final class QuickSort implements IndexedSorter {
    
      private static final IndexedSorter alt = new HeapSort();
    
      public QuickSort() { }
    
      private static void fix(IndexedSortable s, int p, int r) {
        if (s.compare(p, r) > 0) {
          s.swap(p, r);
        }
      }
    
      /**
       * Deepest recursion before giving up and doing a heapsort.
       * Returns 2 * ceil(log(n)).
       */
      protected static int getMaxDepth(int x) {
        if (x <= 0)
          throw new IllegalArgumentException("Undefined for " + x);
        return (32 - Integer.numberOfLeadingZeros(x - 1)) << 2;
      }
    
      /**
       * Sort the given range of items using quick sort.
       * {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth},
       * then switch to {@link HeapSort}.
       */
      @Override
      public void sort(IndexedSortable s, int p, int r) {
        sort(s, p, r, null);
      }
    
      @Override
      public void sort(final IndexedSortable s, int p, int r,
          final Progressable rep) {
        sortInternal(s, p, r, rep, getMaxDepth(r - p));
      }
    
      private static void sortInternal(final IndexedSortable s, int p, int r,
          final Progressable rep, int depth) {
        if (null != rep) {
          rep.progress();
        }
        while (true) {
        if (r-p < 13) {
          for (int i = p; i < r; ++i) {
            for (int j = i; j > p && s.compare(j-1, j) > 0; --j) {
              s.swap(j, j-1);
            }
          }
          return;
        }
        if (--depth < 0) {
          // give up
          alt.sort(s, p, r, rep);
          return;
        }
    
        // select, move pivot into first position
        fix(s, (p+r) >>> 1, p);
        fix(s, (p+r) >>> 1, r - 1);
        fix(s, p, r-1);
    
        // Divide
        int i = p;
        int j = r;
        int ll = p;
        int rr = r;
        int cr;
        while(true) {
          while (++i < j) {
            if ((cr = s.compare(i, p)) > 0) break;
            if (0 == cr && ++ll != i) {
              s.swap(ll, i);
            }
          }
          while (--j > i) {
            if ((cr = s.compare(p, j)) > 0) break;
            if (0 == cr && --rr != j) {
              s.swap(rr, j);
            }
          }
          if (i < j) s.swap(i, j);
          else break;
        }
        j = i;
        // swap pivot- and all eq values- into position
        while (ll >= p) {
          s.swap(ll--, --i);
        }
        while (rr < r) {
          s.swap(rr++, j++);
        }
    
        // Conquer
        // Recurse on smaller interval first to keep stack shallow
        assert i != j;
        if (i - p < r - j) {
          sortInternal(s, p, i, rep, depth);
          p = j;
        } else {
          sortInternal(s, j, r, rep, depth);
          r = i;
        }
        }
      }
    
    }
    

    六.Combine阶段

       当所有数据处理完成后,MapTask对所有临时文件进行一次合并,以确保最终只会生成一个数据文件。
       当所有数据处理完后,MapTask会将所有临时文件合并成一个大文件,并保存到文件output/file.out中,同时生成相应的索引文件output/file.out.index。
       在进行文件合并过程中,MapTask以分区为单位进行合并。对于某个分区,它将采用多轮递归合并的方式。每轮合并io.sort.factor(默认10)个文件,并将产生的文件重新加入待合并列表中,对文件排序后,重复以上过程,直到最终得到一个大文件。
       让每个MapTask最终只生成一个数据文件,可避免同时打开大量文件和同时读取大量小文件产生的随机读取带来的开销。
    
    private void mergeParts() throws IOException, InterruptedException, 
                                         ClassNotFoundException {
          // get the approximate size of the final output/index files
          long finalOutFileSize = 0;
          long finalIndexFileSize = 0;
          final Path[] filename = new Path[numSpills];
          final TaskAttemptID mapId = getTaskID();
    
          for(int i = 0; i < numSpills; i++) {
            filename[i] = mapOutputFile.getSpillFile(i);
            finalOutFileSize += rfs.getFileStatus(filename[i]).getLen();
          }
          if (numSpills == 1) { //the spill is the final output
            sameVolRename(filename[0],
                mapOutputFile.getOutputFileForWriteInVolume(filename[0]));
            if (indexCacheList.size() == 0) {
              sameVolRename(mapOutputFile.getSpillIndexFile(0),
                mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]));
            } else {
              indexCacheList.get(0).writeToFile(
                mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]), job);
            }
            sortPhase.complete();
            return;
          }
    
          // read in paged indices
          for (int i = indexCacheList.size(); i < numSpills; ++i) {
            Path indexFileName = mapOutputFile.getSpillIndexFile(i);
            indexCacheList.add(new SpillRecord(indexFileName, job));
          }
    
          //make correction in the length to include the sequence file header
          //lengths for each partition
          finalOutFileSize += partitions * APPROX_HEADER_LENGTH;
          finalIndexFileSize = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
          Path finalOutputFile =
              mapOutputFile.getOutputFileForWrite(finalOutFileSize);
          Path finalIndexFile =
              mapOutputFile.getOutputIndexFileForWrite(finalIndexFileSize);
    
          //The output stream for the final single output file
          FSDataOutputStream finalOut = rfs.create(finalOutputFile, true, 4096);
    
          if (numSpills == 0) {
            //create dummy files
            IndexRecord rec = new IndexRecord();
            SpillRecord sr = new SpillRecord(partitions);
            try {
              for (int i = 0; i < partitions; i++) {
                long segmentStart = finalOut.getPos();
                FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut);
                Writer<K, V> writer =
                  new Writer<K, V>(job, finalPartitionOut, keyClass, valClass, codec, null);
                writer.close();
                rec.startOffset = segmentStart;
                rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
                rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
                sr.putIndex(rec, i);
              }
              sr.writeToFile(finalIndexFile, job);
            } finally {
              finalOut.close();
            }
            sortPhase.complete();
            return;
          }
          {
            sortPhase.addPhases(partitions); // Divide sort phase into sub-phases
            
            IndexRecord rec = new IndexRecord();
            final SpillRecord spillRec = new SpillRecord(partitions);
            for (int parts = 0; parts < partitions; parts++) {
              //create the segments to be merged
              List<Segment<K,V>> segmentList =
                new ArrayList<Segment<K, V>>(numSpills);
              for(int i = 0; i < numSpills; i++) {
                IndexRecord indexRecord = indexCacheList.get(i).getIndex(parts);
    
                Segment<K,V> s =
                  new Segment<K,V>(job, rfs, filename[i], indexRecord.startOffset,
                                   indexRecord.partLength, codec, true);
                segmentList.add(i, s);
    
                if (LOG.isDebugEnabled()) {
                  LOG.debug("MapId=" + mapId + " Reducer=" + parts +
                      "Spill =" + i + "(" + indexRecord.startOffset + "," +
                      indexRecord.rawLength + ", " + indexRecord.partLength + ")");
                }
              }
    
              int mergeFactor = job.getInt(JobContext.IO_SORT_FACTOR, 100);
              // sort the segments only if there are intermediate merges
              boolean sortSegments = segmentList.size() > mergeFactor;
              //merge
              @SuppressWarnings("unchecked")
              RawKeyValueIterator kvIter = Merger.merge(job, rfs,
                             keyClass, valClass, codec,
                             segmentList, mergeFactor,
                             new Path(mapId.toString()),
                             job.getOutputKeyComparator(), reporter, sortSegments,
                             null, spilledRecordsCounter, sortPhase.phase(),
                             TaskType.MAP);
    
              //write merged output to disk
              long segmentStart = finalOut.getPos();
              FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut);
              Writer<K, V> writer =
                  new Writer<K, V>(job, finalPartitionOut, keyClass, valClass, codec,
                                   spilledRecordsCounter);
              if (combinerRunner == null || numSpills < minSpillsForCombine) {
                Merger.writeFile(kvIter, writer, reporter, job);
              } else {
                combineCollector.setWriter(writer);
                combinerRunner.combine(kvIter, combineCollector);
              }
    
              //close
              writer.close();
    
              sortPhase.startNextPhase();
              
              // record offsets
              rec.startOffset = segmentStart;
              rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
              rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
              spillRec.putIndex(rec, parts);
            }
            spillRec.writeToFile(finalIndexFile, job);
            finalOut.close();
            for(int i = 0; i < numSpills; i++) {
              rfs.delete(filename[i],true);
            }
          }
        }
    

    相关文章

      网友评论

          本文标题:从源码角度分析MapReduce运作_二.Map阶段

          本文链接:https://www.haomeiwen.com/subject/xfklfhtx.html