hadoop实现倒排索引

hadoop实现倒排索引

本文用hadoop实现倒排索引算法,用基本的分两步完成,不使用combine

第一步

读入文档,统计文档中各个单词的个数,与word count类似,但这里把word-filename组合起来作为一个key,并把中间结果写到磁盘中

InverseIndexStepTwo.java

package postlisting;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;

import java.io.IOException;

/**
 * 倒排索引步骤一,先做word count,不过现在的key是word-filename
 */
public class InverseIndexStepOne {
    public static class StepOneMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String line = value.toString();
            // 切分出各个单词
            String[] fields = line.split(" ");
            // 获取文件切片
            FileSplit inputsplit = (FileSplit)context.getInputSplit();
            // 获取文件名
            String filename = inputsplit.getPath().getName();
            // 计数hello-->a.txt  1
            for(String field: fields){
                context.write(new Text(field+"-->"+filename), new LongWritable(1));
            }
        }
    }

    public static class  StepOneReducer extends Reducer<Text, LongWritable, Text, LongWritable>{
        @Override
        protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
            long counter = 0;
            for (LongWritable value: values){
                counter += value.get();
            }
            context.write(key, new LongWritable(counter));
        }
    }

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        job.setJarByClass(InverseIndexStepOne.class);

        job.setMapperClass(StepOneMapper.class);
        job.setReducerClass(StepOneReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);

        // 检查输出文件夹是否已存在,如果存在先删除
        // 本地测试
        Path output = new Path("res/words/output/step1");
        FileSystem fs = FileSystem.get(conf);
        if(fs.exists(output)){
            fs.delete(output, true);
        }
        FileInputFormat.setInputPaths(job, new Path("res/words/input/"));
        FileOutputFormat.setOutputPath(job, output);
        System.out.println(job.waitForCompletion(true));
    }
}

输出结果

hello-->a.txt	2
hello-->b.txt	2
hello-->c.txt	2
jerry-->a.txt	1
jerry-->b.txt	3
jerry-->c.txt	1
tom-->a.txt	3
tom-->b.txt	1
tom-->c.txt	1

第二步

读取上一步的中间结果,解析并合并

InverseIndexStepOne.java

package postlisting;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class InverseIndexStepTwo {
    public static class StepTwoMapper extends Mapper<LongWritable, Text, Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String line = value.toString();
            // hello-->a.txt    1
            String[] fields = line.split("\t");
            String[] wordAndFileName = fields[0].split("-->");
            String word = wordAndFileName[0];
            String fileName = wordAndFileName[1];
            long count = Long.parseLong(fields[1]);
            // <hello, a.txt-->3>
            context.write(new Text(word), new Text(fileName + "-->" + count));
        }
    }

    public static class StepTwoReducer extends Reducer<Text, Text, Text, Text>{
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            // 拿到的数据<hello, a.txt-->3, a.txt-->4,...>
            StringBuilder result = new StringBuilder();
            for (Text value:values){
                result.append(" ").append(value);
            }
            context.write(key, new Text(result.toString()));
        }
    }

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        job.setJarByClass(InverseIndexStepTwo.class);

        job.setMapperClass(StepTwoMapper.class);
        job.setReducerClass(StepTwoReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        // 检查输出文件夹是否已存在,如果存在先删除
        Path output = new Path("res/words/output/step2");
        FileSystem fs = FileSystem.get(conf);
        if(fs.exists(output)){
            fs.delete(output, true);
        }
        FileInputFormat.setInputPaths(job, new Path("res/words/output/step1/"));
        FileOutputFormat.setOutputPath(job, output);
        System.out.println(job.waitForCompletion(true));
    }
}

输出结果

hello	 c.txt-->2 b.txt-->2 a.txt-->2
jerry	 c.txt-->1 b.txt-->3 a.txt-->1
tom	 c.txt-->1 b.txt-->1 a.txt-->3

小结

虽然用combine可以节省代码,但感觉分开写更加灵活,写个shell脚本组织一下就好,Map Reduce的强大之处也在与它的自由组合。

posted @ 2018-03-08 15:01  潇雨危栏  阅读(620)  评论(0编辑  收藏  举报