尚硅谷大数据技术之Hadoop(MapReduce)(新)第7章 MapReduce扩展案例
第7章 MapReduce扩展案例
7.1 倒排索引案例(多job串联)
1.需求
有大量的文本(文档、网页),需要建立搜索索引,如图4-31所示。
(1)数据输入
a.txt文本
atguigu pingping
atguigu ss
atguigu ss
b.txt文本
atguigu pingping
atguigu pingping
pingping ss
c.txt文本
atguigu ss
atguigu pingping
(2)期望输出数据
atguigu c.txt-->2 b.txt-->2 a.txt-->3
pingping c.txt-->1 b.txt-->3 a.txt-->1
ss c.txt-->1 b.txt-->1 a.txt-->2
2.需求分析
3.第一次处理
(1)第一次处理,编写OneIndexMapper类
package com.atguigu.mapreduce.index; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileSplit;
public class OneIndexMapper extends Mapper<LongWritable, Text, Text, IntWritable>{ String name; Text k = new Text(); IntWritable v = new IntWritable(); @Override protected void setup(Context context)throws IOException, InterruptedException {
// 获取文件名称 FileSplit split = (FileSplit) context.getInputSplit(); name = split.getPath().getName(); } @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1 获取1行 String line = value.toString(); // 2 切割 String[] fields = line.split(" "); for (String word : fields) {
// 3 拼接 k.set(word+"--"+name); v.set(1); // 4 写出 context.write(k, v); } } } |
(2)第一次处理,编写OneIndexReducer类
package com.atguigu.mapreduce.index; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer;
public class OneIndexReducer extends Reducer<Text, IntWritable, Text, IntWritable>{ IntWritable v = new IntWritable();
@Override protected void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException { int sum = 0;
// 1 累加求和 for(IntWritable value: values){ sum +=value.get(); } v.set(sum);
// 2 写出 context.write(key, v); } } |
(3)第一次处理,编写OneIndexDriver类
package com.atguigu.mapreduce.index; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class OneIndexDriver {
public static void main(String[] args) throws Exception {
// 输入输出路径需要根据自己电脑上实际的输入输出路径设置 args = new String[] { "e:/input/inputoneindex", "e:/output5" };
Configuration conf = new Configuration();
Job job = Job.getInstance(conf); job.setJarByClass(OneIndexDriver.class);
job.setMapperClass(OneIndexMapper.class); job.setReducerClass(OneIndexReducer.class);
job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class);
FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true); } } |
(4)查看第一次输出结果
atguigu--a.txt 3 atguigu--b.txt 2 atguigu--c.txt 2 pingping--a.txt 1 pingping--b.txt 3 pingping--c.txt 1 ss--a.txt 2 ss--b.txt 1 ss--c.txt 1 |
4.第二次处理
(1)第二次处理,编写TwoIndexMapper类
package com.atguigu.mapreduce.index; import java.io.IOException; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper;
public class TwoIndexMapper extends Mapper<LongWritable, Text, Text, Text>{
Text k = new Text(); Text v = new Text(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 1 获取1行数据 String line = value.toString(); // 2用“--”切割 String[] fields = line.split("--"); k.set(fields[0]); v.set(fields[1]); // 3 输出数据 context.write(k, v); } } |
(2)第二次处理,编写TwoIndexReducer类
package com.atguigu.mapreduce.index; import java.io.IOException; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; public class TwoIndexReducer extends Reducer<Text, Text, Text, Text> {
Text v = new Text();
@Override protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { // atguigu a.txt 3 // atguigu b.txt 2 // atguigu c.txt 2
// atguigu c.txt-->2 b.txt-->2 a.txt-->3
StringBuilder sb = new StringBuilder();
// 1 拼接 for (Text value : values) { sb.append(value.toString().replace("\t", "-->") + "\t"); }
v.set(sb.toString());
// 2 写出 context.write(key, v); } } |
(3)第二次处理,编写TwoIndexDriver类
package com.atguigu.mapreduce.index; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class TwoIndexDriver {
public static void main(String[] args) throws Exception {
// 输入输出路径需要根据自己电脑上实际的输入输出路径设置 args = new String[] { "e:/input/inputtwoindex", "e:/output6" };
Configuration config = new Configuration(); Job job = Job.getInstance(config);
job.setJarByClass(TwoIndexDriver.class); job.setMapperClass(TwoIndexMapper.class); job.setReducerClass(TwoIndexReducer.class);
job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean result = job.waitForCompletion(true); System.exit(result?0:1); } } |
(4)第二次查看最终结果
atguigu c.txt-->2 b.txt-->2 a.txt-->3
pingping c.txt-->1 b.txt-->3 a.txt-->1
ss c.txt-->1 b.txt-->1 a.txt-->2