-
自定义bean对象实现序列化接口(Writable)
1.在企业开发中往往常用的基本序列化类型不能满足所有需求,比如在Hadoop内部传递一个bean对象,那么该对象就需要实现序列化接口
2.常用的数据类型对应的Hadoop数据序列化类型:
D31E30B9-A2FA-44AD-B532-6075F3C96B40.png
3.具体实现bean对象序列化步骤:
3.1>必须实现Writable接口
3.2>反序列化时,需要反射调用空参构造函数,所以必须有空参构造
public FlowBean()
{
super();
}
3.3>重写序列化方法
@Override
public void write (DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
3.4>重写反序列化方法
public function readFields(DataInput in) throws IOException{
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
}
3.5>注意反序列化的顺序和序列化的顺序完全一致
3.6>想要把结果显示在文件中,需要重写toString(),可用"\t"分开,方便后续用
3.7>如果需要将自定义的bean放在key中传输,则还需要实现Comparable接口,因为MapReduce框中的Shuffle过程要求对key必须能排序
@Override
public int compareTo(FlowBean o){
//倒叙排序,从大到小
return this.sumFlow > o.getSumFlow() ? -1 : 1;
}
4.序列化案例实操:
需求:统计每一个手机号耗费的总上行流量、下行流量、总流量(注意:前三个手机号是相同的号码,需要累加起来)
输入数据:
![](https://img.haomeiwen.com/i6348370/cae9ec0a9b4a812a.png)
输入数据格式:
![](https://img.haomeiwen.com/i6348370/08f482f43d9afa22.png)
输出数据格式:
![](https://img.haomeiwen.com/i6348370/665dff220d35fd79.png)
5.源码如下:
- 自定义FlowBean
package com.flowbean.mr.sum;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import org.apache.hadoop.io.Writable;
public class FlowBean implements Writable{
/**
* @var 上行流量
*/
private long upFlow;
/**
* @var 下行流量
*/
private long downFlow;
/**
* @var 总流量
*/
private long sumFlow;
/**
* 空参构造方法,为了后续反射用
*/
public FlowBean() {
super();
}
public FlowBean(long upFlow, long downFlow) {
super();
this.upFlow = upFlow;
this.downFlow = downFlow;
sumFlow = upFlow + downFlow;
}
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(upFlow);
dataOutput.writeLong(downFlow);
dataOutput.writeLong(sumFlow);
}
public void readFields(DataInput dataInput) throws IOException {
// 反序列化的顺序和序列化的顺序完全一致
upFlow = dataInput.readLong();
downFlow = dataInput.readLong();
sumFlow = dataInput.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
// getter and setter
public long getUpFlow() {
return upFlow;
}
public long getDownFlow() {
return downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public void set(long sum_upFlow2, long sum_downFlow2)
{
upFlow = sum_upFlow2;
downFlow = sum_downFlow2;
sumFlow = upFlow + downFlow;
}
}
- Mapper
package com.flowbean.mr.sum;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/*
id 手机号 上行流量 下行流量
1 13074491521 192.168.1.112 www.atguigu.com 2481 24681 200
2 13074491521 192.168.100.2 www.baidu.com 721 98211 200
3 13074491521 192.168.100.15 www.atguigu.com 623 61281 200
4 13398497761 192.168.1.123 www.ifeng.com 0 0 200
5 18965492364 192.168.100.33 www.atguigu.com 81 8181 200
6 13251497152 192.168.1.124 www.aichesong.com 721 0 200
7 15487498711 192.168.100.12 www.alibaba.com 249 8080 200
8 18261492374 192.168.1.17 www.huolala.com 41 23 200
9 17769496291 192.168.100.9 www.jd.com 801 4443 200
10 18912498751 192.168.1.27 www.aiqiyi.com 33 97875 200
11 13074496651 192.168.100.7 www.tianmao.com 64 21710 200
12 13074499912 192.168.1.33 www.atguigu.com 181 83 200
13 18812492111 192.168.100.81 www.cctv.com 0 9911 200
14 15636496612 192.168.1.3 www.18av.com 34 0 200
15 15677498871 192.168.100.21 www.baidu.com 76 2181 200
*/
/**
* @param LongWritable 内容的偏移量 例如:1 => 1 13074491521 192.168.1.112 www.atguigu.com 2481 24681 200
* 64 => 2 13074491521 192.168.100.2 www.baidu.com 721 98211 200
* 125 => 3 13074491521 192.168.100.15 www.atguigu.com 623 61281 200
* @param Text 表示某一行内容 例如: 3 13074491521 192.168.100.15 www.atguigu.com 623 61281 200
* @param Text 输出的key, 这里用的是手机号
* @param FlowBean 输出的value,这里用的是自定义的FlowBean对象
*/
public class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
Text k = new Text();
FlowBean v = new FlowBean();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//1.获取一行
String line = value.toString();
//2.切割 \t
String[] fields = line.split("\t");
//3.封装对象
k.set(fields[1]); //封装手机号
long upFlow = Long.parseLong(fields[fields.length-3]);
long downFlow = Long.parseLong(fields[fields.length-2]);
v.setUpFlow(upFlow);
v.setDownFlow(downFlow);
//4.写出
context.write(k, v);
}
}
- Reduccer
package com.flowbean.mr.sum;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @param 输入key(Mapper阶段的输出key) Text表示手机号
* @param 输入value(Mapper阶段的输出value) FlowBean对象
* @param 输出key Text表示手机号
* @param 输出value 例如:
* 13074491521 2481 24681 27162
* 13074491521 721 98211 98932
*/
public class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
FlowBean v = new FlowBean();
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
super.reduce(key, values, context);
long sum_upFlow = 0;
long sum_downFlow = 0;
//1.累加求和
for(FlowBean flowbean : values){
// 上行流量累加
sum_upFlow += flowbean.getUpFlow();
// 下行流量累加
sum_downFlow += flowbean.getDownFlow();
}
v.set(sum_upFlow, sum_downFlow);
//2.写出
context.write(key, v);
}
}
- Driver
package com.flowbean.mr.sum;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowsumDriver {
public static void main(String[] args) throws IOException,ClassNotFoundException, InterruptedException {
args = new String[]{"/Users/XXX/Desktop/word/input/phone_data.txt", "/Users/XXXX/Desktop/word/phoneoutput"};
//1.获取job对象
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//2.设置jar路径
job.setJarByClass(FlowsumDriver.class);
//3.关联mapper和reducer
job.setMapperClass(FlowCountMapper.class);
job.setReducerClass(FlowCountReducer.class);
//4.设置Mapper阶段输出的key和value
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
//5.设置最终输出的key和value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//6.设置输入输出路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//7.提交job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
网友评论