03 |
import java.util.ArrayList; |
04 |
import java.util.Iterator; |
05 |
import java.util.List; |
09 |
public static void main(String[]
args) { |
17 |
Iterable<String>
iter = new Iterable<String>()
{ |
18 |
public Iterator<String>
iterator() { |
19 |
List<String>
l = new ArrayList<String>(); |
26 |
for ( int count
: new int []
{ 1 , 2 }){ |
27 |
for (String
item : iter) { |
28 |
System.out.println(item); |
30 |
System.out.println( "---------->>
" +
count + "
END." ); |
结果当然是很正常的完整无误的打印了两遍 Iterable
的值。那究竟是什么原因导致了 reduce 阶段的 Iterable
只能被遍历一次呢?
我们先看一段测试代码:
测试数据:
01 |
import java.io.IOException; |
02 |
import java.util.ArrayList; |
03 |
import java.util.List; |
05 |
import org.apache.hadoop.conf.Configuration; |
06 |
import org.apache.hadoop.fs.FileSystem; |
07 |
import org.apache.hadoop.fs.Path; |
08 |
import org.apache.hadoop.io.Text; |
09 |
import org.apache.hadoop.mapreduce.Job; |
10 |
import org.apache.hadoop.mapreduce.Mapper; |
11 |
import org.apache.hadoop.mapreduce.Reducer; |
12 |
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; |
13 |
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; |
14 |
import org.apache.hadoop.util.GenericOptionsParser; |
16 |
public class TestIterable
{ |
18 |
public static class M1 extends Mapper<Object,
Text, Text, Text> { |
19 |
private Text
oKey = new Text(); |
20 |
private Text
oVal = new Text(); |
23 |
public void map(Object
key, Text value, Context context) throws IOException,
InterruptedException { |
24 |
lineArr
= value.toString().split( "
" ); |
27 |
context.write(oKey,
oVal); |
31 |
public static class R1 extends Reducer<Text,
Text, Text, Text> { |
32 |
List<String>
valList = new ArrayList<String>(); |
33 |
List<Text>
textList = new ArrayList<Text>(); |
35 |
public void reduce(Text
key, Iterable<Text> values, Context context) throws IOException, |
36 |
InterruptedException
{ |
40 |
for (Text
val : values) { |
41 |
valList.add(val.toString()); |
46 |
for (Text
text : textList){ |
47 |
strAdd
+= text.toString() + ",
" ; |
49 |
System.out.println(key.toString()
+ "\t" +
strAdd); |
50 |
System.out.println( "......................." ); |
54 |
for (String
val : valList){ |
57 |
System.out.println(key.toString()
+ "\t" +
strAdd); |
58 |
System.out.println( "----------------------" ); |
63 |
for (Text
val : values) { |
64 |
valList.add(val.toString()); |
66 |
for (String
val : valList){ |
69 |
System.out.println(key.toString()
+ "\t" +
strAdd); |
70 |
System.out.println( ">>>>>>>>>>>>>>>>>>>>>>" ); |
74 |
public static void main(String[]
args) throws Exception
{ |
75 |
Configuration
conf = new Configuration(); |
76 |
conf.set( "mapred.job.queue.name" , "regular" ); |
77 |
String[]
otherArgs = new GenericOptionsParser(conf,
args).getRemainingArgs(); |
78 |
if (otherArgs.length
!= 2 )
{ |
79 |
System.err.println( "Usage:
wordcount <in> <out>" ); |
82 |
System.out.println( "------------------------" ); |
83 |
Job
job = new Job(conf, "TestIterable" ); |
84 |
job.setJarByClass(TestIterable. class ); |
85 |
job.setMapperClass(M1. class ); |
86 |
job.setReducerClass(R1. class ); |
87 |
job.setOutputKeyClass(Text. class ); |
88 |
job.setOutputValueClass(Text. class ); |
90 |
FileInputFormat.addInputPath(job, new Path(otherArgs[ 0 ])); |
91 |
FileSystem.get(conf).delete( new Path(otherArgs[ 1 ]), true ); |
92 |
FileOutputFormat.setOutputPath(job, new Path(otherArgs[ 1 ])); |
93 |
System.exit(job.waitForCompletion( true )
? 0 : 1 ); |
在 Eclipse 控制台中的结果如下:
02 |
....................... |
04 |
---------------------- |
06 |
>>>>>>>>>>>>>>>>>>>>>> |
08 |
....................... |
10 |
---------------------- |
12 |
>>>>>>>>>>>>>>>>>>>>>> |
关于第 1 个坑:对象重用( objects
reuse )
reduce方法的javadoc中已经说明了会出现的问题:
The framework calls this method for each <key, (list of values)> pair in the grouped inputs. Output values must be of the same type as input values. Input keys must not be altered. The framework will reuse
the key and value objects that are passed into the reduce, therefore the application should clone the objects they want to keep a copy of.
也就是说虽然reduce方法会反复执行多次,但key和value相关的对象只有两个,reduce会反复重用这两个对象。所以如果要保存key或者value的结果,只能将其中的值取出另存或者重新clone一个对象(例如Text
store = new Text(value) 或者 String a = value.toString()),而不能直接赋引用。因为引用从始至终都是指向同一个对象,你如果直接保存它们,那最后它们都指向最后一个输入记录。会影响最终计算结果而出错。
看到这里,我想你会恍然大悟:这不是刚毕业找工作,面试官常问的问题:String 是不可变对象但为什么能相加呢?为什么字符串相加不提倡用 String,而用 StringBuilder ?如果你还不清楚这个问题怎么回答,建议你看看这篇《深入理解
String, StringBuffer 与 StringBuilder 的区别》http://my.oschina.net/leejun2005/blog/102377
关于第 2 个坑:http://stackoverflow.com/questions/6111248/iterate-twice-on-values
The Iterator you receive from that Iterable's iterator() method is special. The values may not all be in memory; Hadoop may be streaming them from disk. They aren't really backed by a Collection, so it's
nontrivial to allow multiple iterations.
最后想说明的是:hadoop 框架的作者们真的是考虑很周全,在 hadoop 框架中,不仅有对象重用,还有 JVM 重用等,节约一切可以节约的资源,提高一切可以提高的性能。因为在这种海量数据处理的场景下,性能优化是非常重要的,你可能处理100条数据体现不出性能差别,但是你面对的是千亿、万亿级别的数据呢?
PS:
我的代码是在 Eclipse 中远程调试的,所以 reduce 是没有写 hdfs 的,直接在 eclipse 终端上可以看到结果,很方便,关于怎么在 windows 上远程调试 hadoop,请参考这里 《实战
windows7 下 eclipse 远程调试 linux hadoop》http://my.oschina.net/leejun2005/blog/122775
REF:
hadoop中迭代器的对象重用问题
http://paddy-w.iteye.com/blog/1514595
关于 hadoop 中 JVM 重用和对象重用的介绍
http://wikidoop.com/wiki/Hadoop/MapReduce/Reducer