Hadoop基础-09-MapReduce流量统计

源码见:https://github.com/hiszm/hadoop-train

需求分析

access.log

  • 第二个字段:手机号
  • 倒数第三字段:上行流量
  • 倒数第二字段:下行流量

需求:统计每个手机号上行流量和、下行流量和、总的流量和(上行流量和+下行流量和)

Access.java

  • 手机号、上行流量、下行流量、总流量

  • 既然要求和:根据手机号进行分组,然后把该手机号对应的上下行流量加起来

  • Mapper: 把手机号、上行流量、下行流量 拆开把手机号作为key,把Access作为value写出去

  • Reducer:(13726238888,<Access,Access>)

自定义复杂数据类型

package com.bigdata.hadoop.mr.access;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class AccessMapper extends Mapper<LongWritable, Text, Text, Access> {

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String[] lines = value.toString().split("\t");

        String phone = lines[1]; // 取出手机号
        long up = Long.parseLong(lines[lines.length - 3]); // 取出上行流量
        long down = Long.parseLong(lines[lines.length - 2]); // 取出下行流量

        context.write(new Text(phone), new Access(phone, up, down));
    }
}


自定义Mapper类


package com.bigdata.hadoop.mr.access;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class AccessMapper extends Mapper<LongWritable, Text, Text, Access> {

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String[] lines = value.toString().split("\t");

        String phone = lines[1]; // 取出手机号
        long up = Long.parseLong(lines[lines.length - 3]); // 取出上行流量
        long down = Long.parseLong(lines[lines.length - 2]); // 取出下行流量

        context.write(new Text(phone), new Access(phone, up, down));
    }
}

自定义Reducer实现


package com.bigdata.hadoop.mr.access;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class AccessReducer extends Reducer<Text,Access, NullWritable,Access>{
    /**
     *
     * @param key 手机号
     * @param values <access,access>
     */
    @Override
    protected void reduce(Text key, Iterable<Access> values, Context context) throws IOException, InterruptedException {
        long ups = 0;
        long downs =0;
        for(Access access: values){
            ups += access.getUp();
            downs += access.getDown();
        }

        context.write(NullWritable.get(),new Access(key.toString(),ups,downs));

    }
}


Driver开发


package com.bigdata.hadoop.mr.access;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class AccessLocalApp {


    public static void main(String[] args) throws Exception {
        Configuration configuration= new Configuration();



        Job job= Job.getInstance(configuration);
        job.setJarByClass(AccessLocalApp.class);

        job.setMapperClass(AccessMapper.class);
        job.setReducerClass(AccessReducer.class);

        job.setPartitionerClass(AccessPartioner.class);
        job.setNumReduceTasks(3);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Access.class);

        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Access.class);


        FileInputFormat.setInputPaths(job,new Path("access/input"));
        FileOutputFormat.setOutputPath(job,new Path("access/output"));

        job.waitForCompletion(true);







    }




}




自定义Partitioner

这里假设有个需求:将不同单词的统计结果输出到不同文件。这种需求实际上比较常见,比如统计产品的销量时,需要将结果按照产品种类进行拆分。要实现这个功能,就需要用到自定义 Partitioner

这里先介绍下 MapReduce 默认的分类规则:在构建 job 时候,如果不指定,默认的使用的是 HashPartitioner:对 key 值进行哈希散列并对 numReduceTasks 取余。其实现如下:

public class HashPartitioner<K, V> extends Partitioner<K, V> {

  public int getPartition(K key, V value,
                          int numReduceTasks) {
    return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
  }

}

在构建 job 时候指定使用我们自己的分类规则,并设置 reduce 的个数:

// 设置自定义分区规则
 job.setPartitionerClass(AccessPartioner.class);
// 设置 reduce 个数
job.setNumReduceTasks(3);

将不同单词的统计结果输出到不同文件


package com.bigdata.hadoop.mr.access;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

public class AccessPartioner extends Partitioner<Text, Access> {
    /**
     * @param phone 手机号
     */
    @Override
    public int getPartition(Text phone, Access access, int numReduceTasks) {
        if (phone.toString().startsWith("13")) {//13开头的
            return 0;
        } else if (phone.toString().startsWith("15")) {//15开头的
            return 1;
        } else {
            return 2;
        }
    }
}


不洗澡的鲸鱼🐳

posted @ 2020-09-10 23:46  孙中明  阅读(241)  评论(0编辑  收藏  举报