You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cd...@apache.org on 2008/10/01 22:57:37 UTC

svn commit: r700918 - in /hadoop/core/trunk: CHANGES.txt src/test/org/apache/hadoop/mapred/TestReduceFetch.java

Author: cdouglas
Date: Wed Oct  1 13:57:36 2008
New Revision: 700918

URL: http://svn.apache.org/viewvc?rev=700918&view=rev
Log:
HADOOP-4302. Fix a race condition in TestReduceFetch that can yield false
negatvies.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestReduceFetch.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=700918&r1=700917&r2=700918&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Oct  1 13:57:36 2008
@@ -819,6 +819,9 @@
     HADOOP-4232. Fix race condition in JVM reuse when multiple slots become
     free. (ddas via acmurthy) 
 
+    HADOOP-4302. Fix a race condition in TestReduceFetch that can yield false
+    negatvies. (cdouglas)
+
 Release 0.18.2 - Unreleased
 
   BUG FIXES

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestReduceFetch.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestReduceFetch.java?rev=700918&r1=700917&r2=700918&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestReduceFetch.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestReduceFetch.java Wed Oct  1 13:57:36 2008
@@ -67,11 +67,11 @@
       Text key = new Text();
       Text val = new Text();
       key.set("KEYKEYKEYKEYKEYKEYKEYKEY");
-      byte[] b = new byte[1024];
+      byte[] b = new byte[1000];
       Arrays.fill(b, (byte)'V');
       val.set(b);
       b = null;
-      for (int i = 0; i < 1024; ++i) {
+      for (int i = 0; i < 4 * 1024; ++i) {
         output.collect(key, val);
       }
     }
@@ -84,7 +84,6 @@
     conf.setReducerClass(IdentityReducer.class);
     conf.setOutputKeyClass(Text.class);
     conf.setOutputValueClass(Text.class);
-    conf.setNumMapTasks(3);
     conf.setNumReduceTasks(1);
     conf.setInputFormat(FakeIF.class);
     FileInputFormat.setInputPaths(conf, new Path("/in"));
@@ -106,25 +105,41 @@
   public void testReduceFromDisk() throws Exception {
     JobConf job = mrCluster.createJobConf();
     job.set("mapred.job.reduce.input.buffer.percent", "0.0");
+    job.setNumMapTasks(3);
     Counters c = runJob(job);
-    assertTrue(c.findCounter(HDFS_WRITE).getCounter() <=
-               c.findCounter(LOCAL_READ).getCounter());
+    final long hdfsWritten = c.findCounter(HDFS_WRITE).getCounter();
+    final long localRead = c.findCounter(LOCAL_READ).getCounter();
+    assertTrue("Expected more bytes read from local (" +
+        localRead + ") than written to HDFS (" + hdfsWritten + ")",
+        hdfsWritten <= localRead);
   }
 
   public void testReduceFromPartialMem() throws Exception {
     JobConf job = mrCluster.createJobConf();
-    job.setInt("mapred.inmem.merge.threshold", 2);
+    job.setNumMapTasks(5);
+    job.setInt("mapred.inmem.merge.threshold", 0);
     job.set("mapred.job.reduce.input.buffer.percent", "1.0");
+    job.setInt("mapred.reduce.parallel.copies", 1);
+    job.setInt("io.sort.mb", 10);
+    job.set("mapred.child.java.opts", "-Xmx128m");
+    job.set("mapred.job.shuffle.input.buffer.percent", "0.14");
+    job.setNumTasksToExecutePerJvm(1);
+    job.set("mapred.job.shuffle.merge.percent", "1.0");
     Counters c = runJob(job);
-    assertTrue(c.findCounter(HDFS_WRITE).getCounter() >=
-               c.findCounter(LOCAL_READ).getCounter() + 1024 * 1024);
+    final long hdfsWritten = c.findCounter(HDFS_WRITE).getCounter();
+    final long localRead = c.findCounter(LOCAL_READ).getCounter();
+    assertTrue("Expected at least 1MB fewer bytes read from local (" +
+        localRead + ") than written to HDFS (" + hdfsWritten + ")",
+        hdfsWritten >= localRead + 1024 * 1024);
   }
 
   public void testReduceFromMem() throws Exception {
     JobConf job = mrCluster.createJobConf();
     job.set("mapred.job.reduce.input.buffer.percent", "1.0");
+    job.setNumMapTasks(3);
     Counters c = runJob(job);
-    assertTrue(c.findCounter(LOCAL_READ).getCounter() == 0);
+    final long localRead = c.findCounter(LOCAL_READ).getCounter();
+    assertTrue("Non-zero read from local: " + localRead, localRead == 0);
   }
 
 }