美文网首页
hadoop之MapReduce---OutputFormat数

hadoop之MapReduce---OutputFormat数

作者: 大数据小同学 | 来源:发表于2020-04-15 08:50 被阅读0次

    OutputFormat接口实现类

    OutputFormat是MapReduce输出的基类,所有实现MapReduce输出都实现了 OutputFormat接口。下面我们介绍几种常见的OutputFormat实现类。

    1. 文本输出TextOutputFormat
      默认的输出格式是TextOutputFormat,它把每条记录写为文本行。它的键和值可以是任意类型,因为TextOutputFormat调用toString()方法把它们转换为字符串
    2. SequenceFileOutputFormat
      将SequenceFileOutputFormat输出作为后续 MapReduce任务的输入,这便是一种好的输出格式,因为它的格式紧凑,很容易被压缩
    3. 自定义OutputFormat
      根据用户需求,自定义实现输出

    自定义OutputFormat使用场景及步骤

    1. 使用场景
      为了实现控制最终文件的输出路径和输出格式,可以自定义OutputFormat
      例如:要在一个MapReduce程序中根据数据的不同输出两类结果到不同目录,这类灵活的输出需求可以通过自定义OutputFormat来实现。
    2. 自定义OutputFormat步骤
      1)自定义一个类继承FileOutputFormat
      2)改写RecordWriter,具体改写输出数据的方法write()

    自定义OutputFormat案例实操

    过滤输入的log日志,包含liujh的网站输出到e:/liujh.log,不包含liujh的网站输出到e:/other.log。
    输入数据

    http://www.baidu.com
    http://www.google.com
    http://cn.bing.com
    http://www.liujh.com
    http://www.sohu.com
    http://www.sina.com
    http://www.sin2a.com
    http://www.sin2desa.com
    http://www.sindsafa.com
    

    案例实操

    1)编写FilterMapper类

    import java.io.IOException;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Mapper;
    public class FilterMapper extends Mapper<LongWritable, Text, Text, NullWritable>{
        @Override
        protected void map(LongWritable key, Text value, Context context)   throws IOException, InterruptedException {
            // 写出
            context.write(value, NullWritable.get());
        }
    }
    

    2)编写FilterReducer类

    import java.io.IOException;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Reducer;
    public class FilterReducer extends Reducer<Text, NullWritable, Text, NullWritable> {
    Text k = new Text();
        @Override
        protected void reduce(Text key, Iterable<NullWritable> values, Context context)     throws IOException, InterruptedException {
           // 1 获取一行
            String line = key.toString();
           // 2 拼接
            line = line + "\r\n";
           // 3 设置key
           k.set(line);
           // 4 输出
            context.write(k, NullWritable.get());
        }
    }
    

    3)自定义一个OutputFormat类

    import java.io.IOException;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.RecordWriter;
    import org.apache.hadoop.mapreduce.TaskAttemptContext;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    public class FilterOutputFormat extends FileOutputFormat<Text, NullWritable>{
        @Override
        public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job)         throws IOException, InterruptedException {
            // 创建一个RecordWriter
            return new FilterRecordWriter(job);
        }
    }
    

    4)编写RecordWriter类

    import java.io.IOException;
    import org.apache.hadoop.fs.FSDataOutputStream;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.RecordWriter;
    import org.apache.hadoop.mapreduce.TaskAttemptContext;
    public class FilterRecordWriter extends RecordWriter<Text, NullWritable> {
        FSDataOutputStream liujhOut = null;
        FSDataOutputStream otherOut = null;
        public FilterRecordWriter(TaskAttemptContext job) {
            // 1 获取文件系统
            FileSystem fs;
            try {
                fs = FileSystem.get(job.getConfiguration());
    
                // 2 创建输出文件路径
                Path liujhPath = new Path("e:/liujh.log");
                Path otherPath = new Path("e:/other.log");
    
                // 3 创建输出流
                liujhOut = fs.create(liujhPath);
                otherOut = fs.create(otherPath);
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    
        @Override
        public void write(Text key, NullWritable value) throws IOException, InterruptedException {
            // 判断是否包含“liujh”输出到不同文件
            if (key.toString().contains("liujh")) {
                liujhOut.write(key.toString().getBytes());
            } else {
                otherOut.write(key.toString().getBytes());
            }
        }
    
        @Override
        public void close(TaskAttemptContext context) throws IOException, InterruptedException {
            // 关闭资源
    IOUtils.closeStream(liujhOut);
            IOUtils.closeStream(otherOut);  }
    }
    

    5)编写FilterDriver类

    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    public class FilterDriver {
        public static void main(String[] args) throws Exception {
    // 输入输出路径需要根据自己电脑上实际的输入输出路径设置
    args = new String[] { "e:/input/inputoutputformat", "e:/output2" };
            Configuration conf = new Configuration();
            Job job = Job.getInstance(conf);
    
            job.setJarByClass(FilterDriver.class);
            job.setMapperClass(FilterMapper.class);
            job.setReducerClass(FilterReducer.class);
    
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(NullWritable.class);
            
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(NullWritable.class);
    
            // 要将自定义的输出格式组件设置到job中
            job.setOutputFormatClass(FilterOutputFormat.class);
    
            FileInputFormat.setInputPaths(job, new Path(args[0]));
    
            // 虽然我们自定义了outputformat,但是因为我们的outputformat继承自fileoutputformat
            // 而fileoutputformat要输出一个_SUCCESS文件,所以,在这还得指定一个输出目录
            FileOutputFormat.setOutputPath(job, new Path(args[1]));
    
            boolean result = job.waitForCompletion(true);
            System.exit(result ? 0 : 1);
        }
    }
    
    关注微信公众号
    简书:https://www.jianshu.com/u/0278602aea1d
    CSDN:https://blog.csdn.net/u012387141

    相关文章

      网友评论

          本文标题:hadoop之MapReduce---OutputFormat数

          本文链接:https://www.haomeiwen.com/subject/fbucyhtx.html