You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by to...@apache.org on 2012/02/10 02:49:30 UTC

svn commit: r1242635 [10/10] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ bin/ conf/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/...

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm Fri Feb 10 01:49:08 2012
@@ -476,7 +476,7 @@ Hadoop MapReduce Next Generation - Clust
     designated server:
   
 ----
-  $ $YARN_HOME/bin/yarn start historyserver --config $HADOOP_CONF_DIR  
+  $ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR  
 ----    	  
 
     * Hadoop Shutdown      
@@ -519,7 +519,7 @@ Hadoop MapReduce Next Generation - Clust
     designated server:
   
 ----
-  $ $YARN_HOME/bin/yarn stop historyserver --config $HADOOP_CONF_DIR  
+  $ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR  
 ----    	  
 
     
@@ -1020,7 +1020,7 @@ KVNO Timestamp         Principal
     designated server as <mapred>:
   
 ----
-[mapred]$ $YARN_HOME/bin/yarn start historyserver --config $HADOOP_CONF_DIR  
+[mapred]$ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR  
 ----    	  
 
     * Hadoop Shutdown      
@@ -1063,7 +1063,7 @@ KVNO Timestamp         Principal
     designated server as <mapred>:
 
 ----
-[mapred]$ $YARN_HOME/bin/yarn stop historyserver --config $HADOOP_CONF_DIR  
+[mapred]$ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR  
 ----    	  
     
 * {Web Interfaces}      

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/site.xml?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/site.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/site.xml Fri Feb 10 01:49:08 2012
@@ -11,7 +11,7 @@
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
 -->
-<project name="Apache Hadoop 0.23">
+<project name="Apache Hadoop ${project.version}">
 
   <skin>
     <groupId>org.apache.maven.skins</groupId>

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/ivy/libraries.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/ivy/libraries.properties?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/ivy/libraries.properties (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/ivy/libraries.properties Fri Feb 10 01:49:08 2012
@@ -81,6 +81,6 @@ wagon-http.version=1.0-beta-2
 xmlenc.version=0.52
 xerces.version=1.4.4
 
-jackson.version=1.8.2
+jackson.version=1.8.8
 yarn.version=0.24.0-SNAPSHOT
 hadoop-mapreduce.version=0.24.0-SNAPSHOT

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/c++/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/c++:1159757-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/c++:1159757-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/c++:713112
 /hadoop/core/trunk/src/c++:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:1152502-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:1152502-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/contrib:713112
 /hadoop/core/trunk/src/contrib:784664-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/block_forensics/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:1152502-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:1152502-1242632
 /hadoop/core/branches/branch-0.19/hdfs/src/contrib/block_forensics:713112
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/block_forensics:713112
 /hadoop/core/trunk/src/contrib/block_forensics:784664-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/build-contrib.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:1161333-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:1161333-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/build-contrib.xml:713112
 /hadoop/core/trunk/src/contrib/build-contrib.xml:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:1161333-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:1161333-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/build.xml:713112
 /hadoop/core/trunk/src/contrib/build.xml:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/data_join/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:1159757-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:1159757-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/data_join:713112
 /hadoop/core/trunk/src/contrib/data_join:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/eclipse-plugin/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:1159757-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:1159757-1242632
 /hadoop/core/branches/branch-0.19/core/src/contrib/eclipse-plugin:713112
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/eclipse-plugin:713112
 /hadoop/core/trunk/src/contrib/eclipse-plugin:776175-785643

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Statistics.java Fri Feb 10 01:49:08 2012
@@ -101,13 +101,15 @@ public class Statistics implements Compo
     }
     
     int maps = 0;
+    int reds = 0;
     if (jobdesc == null) {
       throw new IllegalArgumentException(
         " JobStory not available for job " + job.getJobName());
     } else {
       maps = jobdesc.getNumberMaps();
+      reds = jobdesc.getNumberReduces();
     }
-    JobStats stats = new JobStats(maps,job);
+    JobStats stats = new JobStats(maps, reds, job);
     jobMaps.put(seq,stats);
   }
 
@@ -258,15 +260,20 @@ public class Statistics implements Compo
    */
   static class JobStats {
     private int noOfMaps;
+    private int noOfReds;
     private Job job;
 
-    public JobStats(int noOfMaps,Job job){
+    public JobStats(int noOfMaps,int numOfReds, Job job){
       this.job = job;
       this.noOfMaps = noOfMaps;
+      this.noOfReds = numOfReds;
     }
     public int getNoOfMaps() {
       return noOfMaps;
     }
+    public int getNoOfReds() {
+      return noOfReds;
+    }
 
     /**
      * Returns the job ,

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java Fri Feb 10 01:49:08 2012
@@ -31,13 +31,12 @@ import org.apache.hadoop.tools.rumen.Job
 
 import java.io.IOException;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.locks.Condition;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 public class StressJobFactory extends JobFactory<Statistics.ClusterStats> {
   public static final Log LOG = LogFactory.getLog(StressJobFactory.class);
 
   private final LoadStatus loadStatus = new LoadStatus();
-  private final Condition condUnderloaded = this.lock.newCondition();
   /**
    * The minimum ratio between pending+running map tasks (aka. incomplete map
    * tasks) and cluster map slot capacity for us to consider the cluster is
@@ -150,23 +149,32 @@ public class StressJobFactory extends Jo
         }
         LOG.info("START STRESS @ " + System.currentTimeMillis());
         while (!Thread.currentThread().isInterrupted()) {
-          lock.lock();
           try {
             while (loadStatus.overloaded()) {
-              //Wait while JT is overloaded.
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Cluster overloaded in run! Sleeping...");
+              }
+              // sleep 
               try {
-                condUnderloaded.await();
+                Thread.sleep(1000);
               } catch (InterruptedException ie) {
                 return;
               }
             }
 
             while (!loadStatus.overloaded()) {
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Cluster underloaded in run! Stressing...");
+              }
               try {
+                //TODO This in-line read can block submission for large jobs.
                 final JobStory job = getNextJobFiltered();
                 if (null == job) {
                   return;
                 }
+                if (LOG.isDebugEnabled()) {
+                  LOG.debug("Job Selected: " + job.getJobID());
+                }
                 submitter.add(
                   jobCreator.createGridmixJob(
                     conf, 0L, job, scratch, 
@@ -175,14 +183,20 @@ public class StressJobFactory extends Jo
                     sequence.getAndIncrement()));
                 // TODO: We need to take care of scenario when one map/reduce
                 // takes more than 1 slot.
-                loadStatus.mapSlotsBackfill -= 
-                  calcEffectiveIncompleteMapTasks(
-                    loadStatus.mapSlotCapacity, job.getNumberMaps(), 0.0f);
-                loadStatus.reduceSlotsBackfill -= 
-                  calcEffectiveIncompleteReduceTasks(
-                    loadStatus.reduceSlotCapacity, job.getNumberReduces(), 
-                    0.0f);
-                --loadStatus.numJobsBackfill;
+                
+                // Lock the loadjob as we are making updates
+                int incompleteMapTasks = (int) calcEffectiveIncompleteMapTasks(
+                                                 loadStatus.getMapCapacity(), 
+                                                 job.getNumberMaps(), 0.0f);
+                loadStatus.decrementMapLoad(incompleteMapTasks);
+                
+                int incompleteReduceTasks = 
+                  (int) calcEffectiveIncompleteReduceTasks(
+                          loadStatus.getReduceCapacity(), 
+                          job.getNumberReduces(), 0.0f);
+                loadStatus.decrementReduceLoad(incompleteReduceTasks);
+                  
+                loadStatus.decrementJobLoad(1);
               } catch (IOException e) {
                 LOG.error("Error while submitting the job ", e);
                 error = e;
@@ -191,7 +205,7 @@ public class StressJobFactory extends Jo
 
             }
           } finally {
-            lock.unlock();
+            // do nothing
           }
         }
       } catch (InterruptedException e) {
@@ -210,19 +224,11 @@ public class StressJobFactory extends Jo
    */
   @Override
   public void update(Statistics.ClusterStats item) {
-    lock.lock();
+    ClusterStatus clusterMetrics = item.getStatus();
     try {
-      ClusterStatus clusterMetrics = item.getStatus();
-      try {
-        checkLoadAndGetSlotsToBackfill(item,clusterMetrics);
-      } catch (Exception e) {
-        LOG.error("Couldn't get the new Status",e);
-      }
-      if (!loadStatus.overloaded()) {
-        condUnderloaded.signalAll();
-      }
-    } finally {
-      lock.unlock();
+      checkLoadAndGetSlotsToBackfill(item, clusterMetrics);
+    } catch (Exception e) {
+      LOG.error("Couldn't get the new Status",e);
     }
   }
 
@@ -254,18 +260,25 @@ public class StressJobFactory extends Jo
    */
   private void checkLoadAndGetSlotsToBackfill(
     ClusterStats stats, ClusterStatus clusterStatus) throws IOException, InterruptedException {
-    loadStatus.mapSlotCapacity = clusterStatus.getMaxMapTasks();
-    loadStatus.reduceSlotCapacity = clusterStatus.getMaxReduceTasks();
     
+    // update the max cluster capacity incase its updated
+    int mapCapacity = clusterStatus.getMaxMapTasks();
+    loadStatus.updateMapCapacity(mapCapacity);
+    
+    int reduceCapacity = clusterStatus.getMaxReduceTasks();
     
-    loadStatus.numJobsBackfill = 
-      (int) (maxJobTrackerRatio * clusterStatus.getTaskTrackers())
-        - stats.getNumRunningJob();
-    if (loadStatus.numJobsBackfill <= 0) {
+    loadStatus.updateReduceCapacity(reduceCapacity);
+    
+    int numTrackers = clusterStatus.getTaskTrackers();
+    
+    int jobLoad = 
+      (int) (maxJobTrackerRatio * numTrackers) - stats.getNumRunningJob();
+    loadStatus.updateJobLoad(jobLoad);
+    if (loadStatus.getJobLoad() <= 0) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug(System.currentTimeMillis() + " Overloaded is "
+        LOG.debug(System.currentTimeMillis() + " [JobLoad] Overloaded is "
                   + Boolean.TRUE.toString() + " NumJobsBackfill is "
-                  + loadStatus.numJobsBackfill);
+                  + loadStatus.getJobLoad());
       }
       return; // stop calculation because we know it is overloaded.
     }
@@ -275,56 +288,84 @@ public class StressJobFactory extends Jo
       float mapProgress = job.getJob().mapProgress();
       int noOfMaps = job.getNoOfMaps();
       incompleteMapTasks += 
-        calcEffectiveIncompleteMapTasks(
-          clusterStatus.getMaxMapTasks(), noOfMaps, mapProgress);
+        calcEffectiveIncompleteMapTasks(mapCapacity, noOfMaps, mapProgress);
     }
-    loadStatus.mapSlotsBackfill = 
-    (int) ((overloadMapTaskMapSlotRatio * clusterStatus.getMaxMapTasks()) 
-           - incompleteMapTasks);
-    if (loadStatus.mapSlotsBackfill <= 0) {
+    
+    int mapSlotsBackFill = 
+      (int) ((overloadMapTaskMapSlotRatio * mapCapacity) - incompleteMapTasks);
+    loadStatus.updateMapLoad(mapSlotsBackFill);
+    
+    if (loadStatus.getMapLoad() <= 0) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug(System.currentTimeMillis() + " Overloaded is "
+        LOG.debug(System.currentTimeMillis() + " [MAP-LOAD] Overloaded is "
                   + Boolean.TRUE.toString() + " MapSlotsBackfill is "
-                  + loadStatus.mapSlotsBackfill);
+                  + loadStatus.getMapLoad());
       }
       return; // stop calculation because we know it is overloaded.
     }
 
     float incompleteReduceTasks = 0; // include pending & running reduce tasks.
     for (JobStats job : ClusterStats.getRunningJobStats()) {
-      int noOfReduces = job.getJob().getNumReduceTasks();
+      // Cached the num-reds value in JobStats
+      int noOfReduces = job.getNoOfReds();
       if (noOfReduces > 0) {
         float reduceProgress = job.getJob().reduceProgress();
         incompleteReduceTasks += 
-          calcEffectiveIncompleteReduceTasks(
-            clusterStatus.getMaxReduceTasks(), noOfReduces, reduceProgress);
+          calcEffectiveIncompleteReduceTasks(reduceCapacity, noOfReduces, 
+                                             reduceProgress);
       }
     }
-    loadStatus.reduceSlotsBackfill = 
-      (int) ((overloadReduceTaskReduceSlotRatio * clusterStatus.getMaxReduceTasks()) 
+    
+    int reduceSlotsBackFill = 
+      (int)((overloadReduceTaskReduceSlotRatio * reduceCapacity) 
              - incompleteReduceTasks);
-    if (loadStatus.reduceSlotsBackfill <= 0) {
+    loadStatus.updateReduceLoad(reduceSlotsBackFill);
+    if (loadStatus.getReduceLoad() <= 0) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug(System.currentTimeMillis() + " Overloaded is "
+        LOG.debug(System.currentTimeMillis() + " [REDUCE-LOAD] Overloaded is "
                   + Boolean.TRUE.toString() + " ReduceSlotsBackfill is "
-                  + loadStatus.reduceSlotsBackfill);
+                  + loadStatus.getReduceLoad());
       }
       return; // stop calculation because we know it is overloaded.
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug(System.currentTimeMillis() + " Overloaded is "
+      LOG.debug(System.currentTimeMillis() + " [OVERALL] Overloaded is "
                 + Boolean.FALSE.toString() + "Current load Status is " 
                 + loadStatus);
     }
   }
 
   static class LoadStatus {
-    int mapSlotsBackfill;
-    int mapSlotCapacity;
-    int reduceSlotsBackfill;
-    int reduceSlotCapacity;
-    int numJobsBackfill;
+    /**
+     * Additional number of map slots that can be requested before
+     * declaring (by Gridmix STRESS mode) the cluster as overloaded. 
+     */
+    private volatile int mapSlotsBackfill;
+    
+    /**
+     * Determines the total map slot capacity of the cluster.
+     */
+    private volatile int mapSlotCapacity;
+    
+    /**
+     * Additional number of reduce slots that can be requested before
+     * declaring (by Gridmix STRESS mode) the cluster as overloaded.
+     */
+    private volatile int reduceSlotsBackfill;
+    
+    /**
+     * Determines the total reduce slot capacity of the cluster.
+     */
+    private volatile int reduceSlotCapacity;
+
+    /**
+     * Determines the max count of running jobs in the cluster.
+     */
+    private volatile int numJobsBackfill;
+    
+    // set the default to true
+    private AtomicBoolean overloaded = new AtomicBoolean(true);
 
     /**
      * Construct the LoadStatus in an unknown state - assuming the cluster is
@@ -339,12 +380,76 @@ public class StressJobFactory extends Jo
       reduceSlotCapacity = -1;
     }
     
-    public boolean overloaded() {
-      return (mapSlotsBackfill <= 0) || (reduceSlotsBackfill <= 0)
-             || (numJobsBackfill <= 0);
+    public synchronized int getMapLoad() {
+      return mapSlotsBackfill;
+    }
+    
+    public synchronized int getMapCapacity() {
+      return mapSlotCapacity;
+    }
+    
+    public synchronized int getReduceLoad() {
+      return reduceSlotsBackfill;
+    }
+    
+    public synchronized int getReduceCapacity() {
+      return reduceSlotCapacity;
+    }
+    
+    public synchronized int getJobLoad() {
+      return numJobsBackfill;
+    }
+    
+    public synchronized void decrementMapLoad(int mapSlotsConsumed) {
+      this.mapSlotsBackfill -= mapSlotsConsumed;
+      updateOverloadStatus();
+    }
+    
+    public synchronized void decrementReduceLoad(int reduceSlotsConsumed) {
+      this.reduceSlotsBackfill -= reduceSlotsConsumed;
+      updateOverloadStatus();
+    }
+
+    public synchronized void decrementJobLoad(int numJobsConsumed) {
+      this.numJobsBackfill -= numJobsConsumed;
+      updateOverloadStatus();
+    }
+    
+    public synchronized void updateMapCapacity(int mapSlotsCapacity) {
+      this.mapSlotCapacity = mapSlotsCapacity;
+      updateOverloadStatus();
+    }
+    
+    public synchronized void updateReduceCapacity(int reduceSlotsCapacity) {
+      this.reduceSlotCapacity = reduceSlotsCapacity;
+      updateOverloadStatus();
+    }
+    
+    public synchronized void updateMapLoad(int mapSlotsBackfill) {
+      this.mapSlotsBackfill = mapSlotsBackfill;
+      updateOverloadStatus();
+    }
+    
+    public synchronized void updateReduceLoad(int reduceSlotsBackfill) {
+      this.reduceSlotsBackfill = reduceSlotsBackfill;
+      updateOverloadStatus();
+    }
+    
+    public synchronized void updateJobLoad(int numJobsBackfill) {
+      this.numJobsBackfill = numJobsBackfill;
+      updateOverloadStatus();
+    }
+    
+    private synchronized void updateOverloadStatus() {
+      overloaded.set((mapSlotsBackfill <= 0) || (reduceSlotsBackfill <= 0)
+                     || (numJobsBackfill <= 0));
+    }
+    
+    public synchronized boolean overloaded() {
+      return overloaded.get();
     }
     
-    public String toString() {
+    public synchronized String toString() {
     // TODO Use StringBuilder instead
       return " Overloaded = " + overloaded()
              + ", MapSlotBackfill = " + mapSlotsBackfill 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java Fri Feb 10 01:49:08 2012
@@ -101,10 +101,17 @@ public class TestGridmixSubmission {
       retiredJobs = new LinkedBlockingQueue<Job>();
     }
 
-    public void verify(ArrayList<JobStory> submitted) throws Exception {
+    public void verify(ArrayList<JobStory> submitted, Configuration clientConf) 
+    throws Exception {
       final ArrayList<Job> succeeded = new ArrayList<Job>();
       assertEquals("Bad job count", expected, retiredJobs.drainTo(succeeded));
       final HashMap<String,JobStory> sub = new HashMap<String,JobStory>();
+      
+      // define the input and output path for the run
+      final Path in = new Path("foo").makeQualified(GridmixTestUtils.dfs);
+      final Path out = 
+        new Path(in, clientConf.get(Gridmix.GRIDMIX_OUT_DIR, "gridmix"));
+      
       for (JobStory spec : submitted) {
         sub.put(spec.getJobID().toString(), spec);
       }
@@ -115,8 +122,7 @@ public class TestGridmixSubmission {
         Configuration conf = job.getConfiguration();
         if (GenerateData.JOB_NAME.equals(jobName)) {
           verifyQueue(conf, jobName);
-          final Path in = new Path("foo").makeQualified(GridmixTestUtils.dfs);
-          final Path out = new Path("/gridmix").makeQualified(GridmixTestUtils.dfs);
+          
           final ContentSummary generated = GridmixTestUtils.dfs.getContentSummary(in);
           assertTrue("Mismatched data gen", // +/- 100k for logs
               (GENDATA << 20) < generated.getLength() + GENSLOP ||
@@ -164,7 +170,7 @@ public class TestGridmixSubmission {
 
         final FileStatus stat = 
           GridmixTestUtils.dfs.getFileStatus(
-            new Path(GridmixTestUtils.DEST, "" + Integer.valueOf(jobSeqNum)));
+            new Path(out, "" + Integer.valueOf(jobSeqNum)));
         assertEquals("Wrong owner for " + jobName, spec.getUser(),
                      stat.getOwner());
 
@@ -337,8 +343,9 @@ public class TestGridmixSubmission {
     private JobFactory factory;
     private TestMonitor monitor;
 
-    public void checkMonitor() throws Exception {
-      monitor.verify(((DebugJobFactory.Debuggable)factory).getSubmitted());
+    public void checkMonitor(Configuration conf) throws Exception {
+      monitor.verify(((DebugJobFactory.Debuggable)factory).getSubmitted(), 
+                     conf);
     }
 
     @Override
@@ -534,9 +541,11 @@ public class TestGridmixSubmission {
       GridmixTestUtils.dfs.setPermission(root, new FsPermission((short)0777));
       int res = ToolRunner.run(conf, client, argv);
       assertEquals("Client exited with nonzero status", 0, res);
-      client.checkMonitor();
+      client.checkMonitor(conf);
     } catch (Exception e) {
       e.printStackTrace();
+      // fail the test if there is an exception
+      throw new RuntimeException(e);
     } finally {
       in.getFileSystem(conf).delete(in, true);
       out.getFileSystem(conf).delete(out, true);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/org/apache/hadoop/mapred/gridmix/TestGridmixSummary.java Fri Feb 10 01:49:08 2012
@@ -338,7 +338,7 @@ public class TestGridmixSummary {
         return isSuccessful;
       };
     };
-    return new JobStats(numMaps, fakeJob);
+    return new JobStats(numMaps, numReds, fakeJob);
   }
   
   /**

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/index/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:1159757-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:1159757-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/index:713112
 /hadoop/core/trunk/src/contrib/index:776175-786373

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java Fri Feb 10 01:49:08 2012
@@ -108,7 +108,7 @@ public class RaidBlockSender implements 
        BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
        short version = header.getVersion();
 
-        if (version != FSDataset.METADATA_VERSION) {
+        if (version != BlockMetadataHeader.VERSION) {
           LOG.warn("Wrong version (" + version + ") for metadata file for "
               + block + " ignoring ...");
         }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java Fri Feb 10 01:49:08 2012
@@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.RaidBlockSender;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -646,7 +646,7 @@ public abstract class BlockFixer extends
       DataOutputStream mdOut = new DataOutputStream(mdOutBase);
       
       // First, write out the version.
-      mdOut.writeShort(FSDataset.METADATA_VERSION);
+      mdOut.writeShort(BlockMetadataHeader.VERSION);
       
       // Create a summer and write out its header.
       int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/vaidya/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:1159757-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:1159757-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/vaidya:713112
 /hadoop/core/trunk/src/contrib/vaidya:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/examples/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/examples:1152502-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/examples:1152502-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/examples:713112
 /hadoop/core/trunk/src/examples:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/java:1152502-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/java:1152502-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/java:713112
 /hadoop/core/trunk/src/mapred:776175-785643

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/mapred-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/mapred-default.xml?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/mapred-default.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/mapred-default.xml Fri Feb 10 01:49:08 2012
@@ -434,18 +434,6 @@
 </property>
 
 <property>
-  <name>mapreduce.task.tmp.dir</name>
-  <value>./tmp</value>
-  <description> To set the value of tmp directory for map and reduce tasks.
-  If the value is an absolute path, it is directly assigned. Otherwise, it is
-  prepended with task's working directory. The java tasks are executed with
-  option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
-  streaming are set with environment variable,
-   TMPDIR='the absolute path of the tmp dir'
-  </description>
-</property>
-
-<property>
   <name>mapreduce.map.log.level</name>
   <value>INFO</value>
   <description>The logging level for the map task. The allowed levels are:

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:1152502-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:1152502-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred:713112
 /hadoop/core/trunk/src/test/mapred:776175-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:1159757-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:1159757-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/fs:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/fs:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:1152502-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:1152502-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/hdfs:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/hdfs:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/hdfs:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:1159757-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:1159757-1242632
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/ipc:713112
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/ipc:713112
 /hadoop/core/trunk/src/test/hdfs-with-mr/org/apache/hadoop/ipc:776175-784663

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java Fri Feb 10 01:49:08 2012
@@ -33,7 +33,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -148,16 +148,10 @@ public class TestSubmitJob {
         conf, NetUtils.getSocketFactory(conf, ClientProtocol.class));
   }
 
-  static ClientNamenodeWireProtocol getDFSClient(
-      Configuration conf, UserGroupInformation ugi) 
-  throws IOException {
-    return (ClientNamenodeWireProtocol) 
-      RPC.getProxy(ClientNamenodeWireProtocol.class, 
-          ClientNamenodeWireProtocol.versionID, 
-        NameNode.getAddress(conf), ugi, 
-        conf, 
-        NetUtils.getSocketFactory(conf, 
-            ClientNamenodeWireProtocol.class));
+  static org.apache.hadoop.hdfs.protocol.ClientProtocol getDFSClient(
+      Configuration conf, UserGroupInformation ugi) throws IOException {
+    return new ClientNamenodeProtocolTranslatorPB(NameNode.getAddress(conf),
+        conf, ugi);
   }
   
   /**
@@ -226,7 +220,7 @@ public class TestSubmitJob {
       UserGroupInformation user2 = 
         TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
       JobConf conf_other = mr.createJobConf();
-      ClientNamenodeWireProtocol client = 
+      org.apache.hadoop.hdfs.protocol.ClientProtocol client = 
         getDFSClient(conf_other, user2);
 
       // try accessing mapred.system.dir/jobid/*

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/webapps/job/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/webapps/job:1152502-1237154
+/hadoop/common/trunk/hadoop-mapreduce-project/src/webapps/job:1152502-1242632
 /hadoop/core/branches/branch-0.19/mapred/src/webapps/job:713112
 /hadoop/core/trunk/src/webapps/job:776175-785643