美文网首页
MapReduce 基础 (七)MapReduce加载分布式缓存

MapReduce 基础 (七)MapReduce加载分布式缓存

作者: 做个合格的大厂程序员 | 来源:发表于2020-06-17 11:11 被阅读0次

自定义一个mapper类需要实现如下步骤

package cn.leon.reduce_join;

import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.HashMap;

public class MapperJoinMapper extends Mapper<LongWritable, Text,Text,Text> {

    private HashMap<String, String> stringHashMap = new HashMap<>();

    //第一件事情:将分布式缓存的小表数据读取到本地Map集合(只需要做一次)

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        //1:获取分布式缓存文件列表
        URI[] cacheFiles =  context.getCacheFiles();

        //2:获取指定的分布式缓存文件的文件系统(FileSystem)
        FileSystem fileSystem = FileSystem.get(cacheFiles[0], context.getConfiguration());

        //3:获取文件的输入流
        FSDataInputStream inputStream = fileSystem.open(new Path(cacheFiles[0]));

        //4:读取文件内容, 并将数据存入Map集合
        //4.1 将字节输入流转为字符缓冲流FSDataInputStream --->BufferedReader
        BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream));
        //4.2 读取小表文件内容,以行位单位,并将读取的数据存入map集合


        String line = null;
        while((line = bufferedReader.readLine()) != null){
            String[] split = line.split(",");

            stringHashMap.put(split[0], line);

        }
        //5:关闭流
        bufferedReader.close();
        fileSystem.close();
    }

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String[] split = value.toString().split(",");
        String productId = split[2];

        //k2
        String productLine = stringHashMap.get(productId);
        //v2
        String valueLine = productLine + "\t" + value.toString();


        context.write(new Text(productId),new Text(valueLine));
    }
}

主类中

package cn.leon.reduce_join;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.File;
import java.net.URI;

public class ReducerMain extends Configured implements Tool {
    @Override
    public int run(String[] strings) throws Exception {

        Job job = Job.getInstance(super.getConf(),"reducer_join");

        //把小表放在分布式缓存中
        job.addCacheFile(new URI("hdfs://node01:8020/input/join/product.txt"));

        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path("/Users/caoxiaozhu/Desktop/reduce"));

        //分布式缓存mapper
        job.setMapperClass(MapperJoinMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        Path path = new Path("/Users/caoxiaozhu/Desktop/result");
        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,path);

        FileSystem fileSystem = FileSystem.get(new URI("/Users/caoxiaozhu/Desktop/result"),new Configuration());
        if (fileSystem.exists(path)){
            fileSystem.delete(path,true);
        }

        boolean bl = job.waitForCompletion(true);
        return bl?0:1;
    }

    public static void main(String[] args) throws Exception{
        Configuration configuration = new Configuration();
        int run = ToolRunner.run(configuration,new ReducerMain(),args);
        System.exit(run);
    }
}

相当于不需要reducer,直接在mapper中合并k2 v2即可。

相关文章

网友评论

      本文标题:MapReduce 基础 (七)MapReduce加载分布式缓存

      本文链接:https://www.haomeiwen.com/subject/ajwjxktx.html