You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by at...@apache.org on 2011/12/27 19:18:41 UTC

svn commit: r1224965 [2/3] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ conf/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/mai...

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java Tue Dec 27 18:18:36 2011
@@ -44,10 +44,13 @@ import org.apache.hadoop.mapreduce.jobhi
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.app.MRApp;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
 import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
@@ -62,10 +65,12 @@ import org.junit.Test;
 public class TestJobHistoryParsing {
   private static final Log LOG = LogFactory.getLog(TestJobHistoryParsing.class);
 
+  private static final String RACK_NAME = "/MyRackName";
+
   public static class MyResolver implements DNSToSwitchMapping {
     @Override
     public List<String> resolve(List<String> names) {
-      return Arrays.asList(new String[]{"/MyRackName"});
+      return Arrays.asList(new String[]{RACK_NAME});
     }
   }
 
@@ -172,7 +177,7 @@ public class TestJobHistoryParsing {
 
         // Verify rack-name
         Assert.assertEquals("rack-name is incorrect", taskAttemptInfo
-            .getRackname(), "/MyRackName");
+            .getRackname(), RACK_NAME);
       }
     }
 
@@ -217,9 +222,89 @@ public class TestJobHistoryParsing {
     Assert.assertEquals("Status does not match", "SUCCEEDED",
         jobSummaryElements.get("status"));
   }
+  
+  @Test
+  public void testHistoryParsingForFailedAttempts() throws Exception {
+    Configuration conf = new Configuration();
+    conf
+        .setClass(
+            CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+            MyResolver.class, DNSToSwitchMapping.class);
+    RackResolver.init(conf);
+    MRApp app = new MRAppWithHistoryWithFailedAttempt(2, 1, true, this.getClass().getName(),
+        true);
+    app.submit(conf);
+    Job job = app.getContext().getAllJobs().values().iterator().next();
+    JobId jobId = job.getID();
+    app.waitForState(job, JobState.SUCCEEDED);
+    
+    // make sure all events are flushed
+    app.waitForState(Service.STATE.STOPPED);
+
+    String jobhistoryDir = JobHistoryUtils
+        .getHistoryIntermediateDoneDirForUser(conf);
+    JobHistory jobHistory = new JobHistory();
+    jobHistory.init(conf);
+
+    JobIndexInfo jobIndexInfo = jobHistory.getJobMetaInfo(jobId)
+        .getJobIndexInfo();
+    String jobhistoryFileName = FileNameIndexUtils
+        .getDoneFileName(jobIndexInfo);
+
+    Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName);
+    FSDataInputStream in = null;
+    FileContext fc = null;
+    try {
+      fc = FileContext.getFileContext(conf);
+      in = fc.open(fc.makeQualified(historyFilePath));
+    } catch (IOException ioe) {
+      LOG.info("Can not open history file: " + historyFilePath, ioe);
+      throw (new Exception("Can not open History File"));
+    }
+
+    JobHistoryParser parser = new JobHistoryParser(in);
+    JobInfo jobInfo = parser.parse();
+    int noOffailedAttempts = 0;
+    Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
+    for (Task task : job.getTasks().values()) {
+      TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
+      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
+        TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
+            TypeConverter.fromYarn((taskAttempt.getID())));
+        // Verify rack-name for all task attempts
+        Assert.assertEquals("rack-name is incorrect", taskAttemptInfo
+            .getRackname(), RACK_NAME);
+        if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
+          noOffailedAttempts++;
+        }
+      }
+    }
+    Assert.assertEquals("No of Failed tasks doesn't match.", 2, noOffailedAttempts);
+  }
+  
+  static class MRAppWithHistoryWithFailedAttempt extends MRAppWithHistory {
+
+    public MRAppWithHistoryWithFailedAttempt(int maps, int reduces, boolean autoComplete,
+        String testName, boolean cleanOnStart) {
+      super(maps, reduces, autoComplete, testName, cleanOnStart);
+    }
+    
+    @SuppressWarnings("unchecked")
+    @Override
+    protected void attemptLaunched(TaskAttemptId attemptID) {
+      if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+      } else {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
+      }
+    }
+  }
 
   public static void main(String[] args) throws Exception {
     TestJobHistoryParsing t = new TestJobHistoryParsing();
     t.testHistoryParsing();
+    t.testHistoryParsingForFailedAttempts();
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/bin/yarn
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/bin/yarn?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/bin/yarn (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/bin/yarn Tue Dec 27 18:18:36 2011
@@ -141,6 +141,8 @@ if [ -d "$YARN_HOME/build/tools" ]; then
   CLASSPATH=${CLASSPATH}:$YARN_HOME/build/tools
 fi
 
+CLASSPATH=${CLASSPATH}:$YARN_HOME/share/hadoop/mapreduce/*
+CLASSPATH=${CLASSPATH}:$YARN_HOME/share/hadoop/mapreduce/lib/*
 
 # so that filenames w/ spaces are handled correctly in loops below
 IFS=

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java Tue Dec 27 18:18:36 2011
@@ -61,4 +61,17 @@ public interface AllocateResponse {
   @Private
   @Unstable
   public abstract void setAMResponse(AMResponse amResponse);
+  
+  
+  /**
+   * Get the number of hosts available on the cluster.
+   * @return the available host count.
+   */
+  @Public
+  @Stable
+  public int getNumClusterNodes();
+  
+  @Private
+  @Unstable
+  public void setNumClusterNodes(int numNodes);
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java Tue Dec 27 18:18:36 2011
@@ -29,7 +29,8 @@ import org.apache.hadoop.yarn.proto.Yarn
 
 
     
-public class AllocateResponsePBImpl extends ProtoBase<AllocateResponseProto> implements AllocateResponse {
+public class AllocateResponsePBImpl extends ProtoBase<AllocateResponseProto>
+    implements AllocateResponse {
   AllocateResponseProto proto = AllocateResponseProto.getDefaultInstance();
   AllocateResponseProto.Builder builder = null;
   boolean viaProto = false;
@@ -95,7 +96,20 @@ public class AllocateResponsePBImpl exte
       builder.clearAMResponse();
     this.amResponse = aMResponse;
   }
+  
+  @Override
+  public int getNumClusterNodes() {
+    AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getNumClusterNodes();
+  }
+  
+  @Override
+  public void setNumClusterNodes(int numNodes) {
+    maybeInitBuilder();
+    builder.setNumClusterNodes(numNodes);
+  }
 
+  
   private AMResponsePBImpl convertFromProtoFormat(AMResponseProto p) {
     return new AMResponsePBImpl(p);
   }
@@ -103,7 +117,4 @@ public class AllocateResponsePBImpl exte
   private AMResponseProto convertToProtoFormat(AMResponse t) {
     return ((AMResponsePBImpl)t).getProto();
   }
-
-
-
 }  

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto Tue Dec 27 18:18:36 2011
@@ -59,6 +59,7 @@ message AllocateRequestProto {
 
 message AllocateResponseProto {
   optional AMResponseProto AM_response = 1;
+  optional int32 num_cluster_nodes = 2;
 }
 
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java Tue Dec 27 18:18:36 2011
@@ -184,7 +184,7 @@ public class ApplicationMaster {
   private CopyOnWriteArrayList<ContainerId> releasedContainers = new CopyOnWriteArrayList<ContainerId>();
 
   // Launch threads
-  private List<Thread> launchThreads = new ArrayList<Thread>();	
+  private List<Thread> launchThreads = new ArrayList<Thread>();
 
   /**
    * @param args Command line args
@@ -194,7 +194,7 @@ public class ApplicationMaster {
     try {
       ApplicationMaster appMaster = new ApplicationMaster();
       LOG.info("Initializing ApplicationMaster");
-      boolean doRun = appMaster.init(args);	
+      boolean doRun = appMaster.init(args);
       if (!doRun) {
         System.exit(0);
       }
@@ -202,14 +202,14 @@ public class ApplicationMaster {
     } catch (Throwable t) {
       LOG.fatal("Error running ApplicationMaster", t);
       System.exit(1);
-    }		
+    }
     if (result) {
       LOG.info("Application Master completed successfully. exiting");
       System.exit(0);
     }
     else {
       LOG.info("Application Master failed. exiting");
-      System.exit(2);			
+      System.exit(2);
     }
   }
 
@@ -218,7 +218,7 @@ public class ApplicationMaster {
    */
   private void dumpOutDebugInfo() {
 
-    LOG.info("Dump debug output");		
+    LOG.info("Dump debug output");
     Map<String, String> envs = System.getenv();
     for (Map.Entry<String, String> env : envs.entrySet()) {
       LOG.info("System env: key=" + env.getKey() + ", val=" + env.getValue());
@@ -277,7 +277,7 @@ public class ApplicationMaster {
     if (args.length == 0) {
       printUsage(opts);
       throw new IllegalArgumentException("No args specified for application master to initialize");
-    }		
+    }
 
     if (cliParser.hasOption("help")) {
       printUsage(opts);
@@ -297,8 +297,8 @@ public class ApplicationMaster {
         appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
       } 
       else {
-        throw new IllegalArgumentException("Application Attempt Id not set in the environment");				
-      }	
+        throw new IllegalArgumentException("Application Attempt Id not set in the environment");
+      }
     } else {
       ContainerId containerId = ConverterUtils.toContainerId(envs.get(ApplicationConstants.AM_CONTAINER_ID_ENV));
       appAttemptID = containerId.getApplicationAttemptId();
@@ -338,11 +338,11 @@ public class ApplicationMaster {
     if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION)) {
       shellScriptPath = envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION);
 
-      if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP)) {				
-        shellScriptPathTimestamp = Long.valueOf(envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP));				
-      }					
+      if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP)) {
+        shellScriptPathTimestamp = Long.valueOf(envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP));
+      }
       if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN)) {
-        shellScriptPathLen = Long.valueOf(envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN));				
+        shellScriptPathLen = Long.valueOf(envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN));
       }
 
       if (!shellScriptPath.isEmpty()
@@ -351,7 +351,7 @@ public class ApplicationMaster {
         LOG.error("Illegal values in env for shell script path"
             + ", path=" + shellScriptPath
             + ", len=" + shellScriptPathLen
-            + ", timestamp=" + shellScriptPathTimestamp);	
+            + ", timestamp=" + shellScriptPathTimestamp);
         throw new IllegalArgumentException("Illegal values in env for shell script path");
       }
     }
@@ -368,7 +368,7 @@ public class ApplicationMaster {
    * @param opts Parsed command line options
    */
   private void printUsage(Options opts) {
-    new HelpFormatter().printHelp("ApplicationMaster", opts);		
+    new HelpFormatter().printHelp("ApplicationMaster", opts);
   }
 
   /**
@@ -378,7 +378,7 @@ public class ApplicationMaster {
   public boolean run() throws YarnRemoteException {
     LOG.info("Starting ApplicationMaster");
 
-    // Connect to ResourceManager 	
+    // Connect to ResourceManager
     resourceManager = connectToRM();
 
     // Setup local RPC Server to accept status requests directly from clients 
@@ -395,7 +395,7 @@ public class ApplicationMaster {
 
     // A resource ask has to be atleast the minimum of the capability of the cluster, the value has to be 
     // a multiple of the min value and cannot exceed the max. 
-    // If it is not an exact multiple of min, the RM will allocate to the nearest multiple of min		
+    // If it is not an exact multiple of min, the RM will allocate to the nearest multiple of min
     if (containerMemory < minMem) {
       LOG.info("Container memory specified below min threshold of cluster. Using min value."
           + ", specified=" + containerMemory
@@ -409,14 +409,14 @@ public class ApplicationMaster {
       containerMemory = maxMem;
     }
 
-    // Setup heartbeat emitter 		
+    // Setup heartbeat emitter
     // TODO poll RM every now and then with an empty request to let RM know that we are alive
     // The heartbeat interval after which an AM is timed out by the RM is defined by a config setting: 
     // RM_AM_EXPIRY_INTERVAL_MS with default defined by DEFAULT_RM_AM_EXPIRY_INTERVAL_MS
     // The allocate calls to the RM count as heartbeats so, for now, this additional heartbeat emitter 
     // is not required.
 
-    // Setup ask for containers from RM		    
+    // Setup ask for containers from RM
     // Send request for containers to RM
     // Until we get our fully allocated quota, we keep on polling RM for containers
     // Keep looping until all the containers are launched and shell script executed on them 
@@ -426,7 +426,7 @@ public class ApplicationMaster {
 
     while (numCompletedContainers.get() < numTotalContainers
         && !appDone) {
-      loopCounter++;		
+      loopCounter++;
 
       // log current state
       LOG.info("Current application state: loop=" + loopCounter 
@@ -435,7 +435,7 @@ public class ApplicationMaster {
           + ", requested=" + numRequestedContainers
           + ", completed=" + numCompletedContainers
           + ", failed=" + numFailedContainers
-          + ", currentAllocated=" + numAllocatedContainers);			
+          + ", currentAllocated=" + numAllocatedContainers);
 
       // Sleep before each loop when asking RM for containers
       // to avoid flooding RM with spurious requests when it 
@@ -444,7 +444,7 @@ public class ApplicationMaster {
       try {
         Thread.sleep(1000);
       } catch (InterruptedException e) {
-        LOG.info("Sleep interrupted " + e.getMessage());				
+        LOG.info("Sleep interrupted " + e.getMessage());
       }
 
       // No. of containers to request 
@@ -457,14 +457,14 @@ public class ApplicationMaster {
       // Setup request to be sent to RM to allocate containers
       List<ResourceRequest> resourceReq = new ArrayList<ResourceRequest>();
       if (askCount > 0) {
-        ResourceRequest containerAsk = setupContainerAskForRM(askCount);			
+        ResourceRequest containerAsk = setupContainerAskForRM(askCount);
         resourceReq.add(containerAsk);
       }
 
       // Send the request to RM 
       LOG.info("Asking RM for containers"
           + ", askCount=" + askCount);
-      AMResponse amResp =	sendContainerAskToRM(resourceReq);			
+      AMResponse amResp =sendContainerAskToRM(resourceReq);
 
       // Retrieve list of allocated containers from the response 
       List<Container> allocatedContainers = amResp.getAllocatedContainers();
@@ -478,10 +478,10 @@ public class ApplicationMaster {
             + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress()
             + ", containerState" + allocatedContainer.getState()
             + ", containerResourceMemory" + allocatedContainer.getResource().getMemory());
-        //						+ ", containerToken" + allocatedContainer.getContainerToken().getIdentifier().toString());
+        //+ ", containerToken" + allocatedContainer.getContainerToken().getIdentifier().toString());
 
         LaunchContainerRunnable runnableLaunchContainer = new LaunchContainerRunnable(allocatedContainer);
-        Thread launchThread = new Thread(runnableLaunchContainer);	
+        Thread launchThread = new Thread(runnableLaunchContainer);
 
         // launch and start the container on a separate thread to keep the main thread unblocked
         // as all containers may not be allocated at one go.
@@ -492,14 +492,14 @@ public class ApplicationMaster {
       // Check what the current available resources in the cluster are
       // TODO should we do anything if the available resources are not enough? 
       Resource availableResources = amResp.getAvailableResources();
-      LOG.info("Current available resources in the cluster " + availableResources);			
+      LOG.info("Current available resources in the cluster " + availableResources);
 
-      // Check the completed containers 			
+      // Check the completed containers
       List<ContainerStatus> completedContainers = amResp.getCompletedContainersStatuses();
       LOG.info("Got response from RM for container ask, completedCnt=" + completedContainers.size());
-      for (ContainerStatus containerStatus : completedContainers) {				
+      for (ContainerStatus containerStatus : completedContainers) {
         LOG.info("Got container status for containerID= " + containerStatus.getContainerId()
-            + ", state=" + containerStatus.getState()	
+            + ", state=" + containerStatus.getState()
             + ", exitStatus=" + containerStatus.getExitStatus() 
             + ", diagnostics=" + containerStatus.getDiagnostics());
 
@@ -514,7 +514,7 @@ public class ApplicationMaster {
             // shell script failed
             // counts as completed 
             numCompletedContainers.incrementAndGet();
-            numFailedContainers.incrementAndGet();							
+            numFailedContainers.incrementAndGet();
           }
           else { 
             // something else bad happened 
@@ -541,15 +541,15 @@ public class ApplicationMaster {
 
       LOG.info("Current application state: loop=" + loopCounter
           + ", appDone=" + appDone
-          + ", total=" + numTotalContainers					
+          + ", total=" + numTotalContainers
           + ", requested=" + numRequestedContainers
           + ", completed=" + numCompletedContainers
           + ", failed=" + numFailedContainers
-          + ", currentAllocated=" + numAllocatedContainers);	
+          + ", currentAllocated=" + numAllocatedContainers);
 
       // TODO 
       // Add a timeout handling layer 
-      // for misbehaving shell commands			
+      // for misbehaving shell commands
     }
 
     // Join all launched threads
@@ -561,7 +561,7 @@ public class ApplicationMaster {
       } catch (InterruptedException e) {
         LOG.info("Exception thrown in thread join: " + e.getMessage());
         e.printStackTrace();
-      }			
+      }
     }
 
     // When the application completes, it should send a finish application signal 
@@ -610,10 +610,11 @@ public class ApplicationMaster {
      * Helper function to connect to CM
      */
     private void connectToCM() {
-      String cmIpPortStr = container.getNodeId().getHost() + ":" 
-          + container.getNodeId().getPort();		
-      InetSocketAddress cmAddress = NetUtils.createSocketAddr(cmIpPortStr);		
-      LOG.info("Connecting to ResourceManager at " + cmIpPortStr);
+      LOG.debug("Connecting to ContainerManager for containerid=" + container.getId());
+      String cmIpPortStr = container.getNodeId().getHost() + ":"
+          + container.getNodeId().getPort();
+      InetSocketAddress cmAddress = NetUtils.createSocketAddr(cmIpPortStr);
+      LOG.info("Connecting to ContainerManager at " + cmIpPortStr);
       this.cm = ((ContainerManager) rpc.getProxy(ContainerManager.class, cmAddress, conf));
     }
 
@@ -626,7 +627,6 @@ public class ApplicationMaster {
      */
     public void run() {
       // Connect to ContainerManager 
-      LOG.info("Connecting to container manager for containerid=" + container.getId());
       connectToCM();
 
       LOG.info("Setting up container launch container for containerid=" + container.getId());
@@ -654,7 +654,7 @@ public class ApplicationMaster {
       if (!shellScriptPath.isEmpty()) {
         LocalResource shellRsrc = Records.newRecord(LocalResource.class);
         shellRsrc.setType(LocalResourceType.FILE);
-        shellRsrc.setVisibility(LocalResourceVisibility.APPLICATION);	   
+        shellRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
         try {
           shellRsrc.setResource(ConverterUtils.getYarnUrlFromURI(new URI(shellScriptPath)));
         } catch (URISyntaxException e) {
@@ -664,17 +664,17 @@ public class ApplicationMaster {
 
           // A failure scenario on bad input such as invalid shell script path 
           // We know we cannot continue launching the container 
-          // so we should release it. 															 					
+          // so we should release it.
           // TODO
           numCompletedContainers.incrementAndGet();
           numFailedContainers.incrementAndGet();
-          return;					
+          return;
         }
         shellRsrc.setTimestamp(shellScriptPathTimestamp);
         shellRsrc.setSize(shellScriptPathLen);
         localResources.put(ExecShellStringPath, shellRsrc);
-      }			
-      ctx.setLocalResources(localResources);			
+      }
+      ctx.setLocalResources(localResources);
 
       // Set the necessary command to execute on the allocated container 
       Vector<CharSequence> vargs = new Vector<CharSequence>(5);
@@ -686,7 +686,7 @@ public class ApplicationMaster {
         vargs.add(ExecShellStringPath);
       }
 
-      // Set args for the shell command if any			
+      // Set args for the shell command if any
       vargs.add(shellArgs);
       // Add log redirect params
       // TODO
@@ -722,19 +722,19 @@ public class ApplicationMaster {
       // Left commented out as the shell scripts are short lived 
       // and we are relying on the status for completed containers from RM to detect status
 
-      //		    GetContainerStatusRequest statusReq = Records.newRecord(GetContainerStatusRequest.class);
-      //		    statusReq.setContainerId(container.getId());
-      //		    GetContainerStatusResponse statusResp;
-      //			try {
-      //				statusResp = cm.getContainerStatus(statusReq);
-      //			    LOG.info("Container Status"
-      //			    		+ ", id=" + container.getId()
-      //			    		+ ", status=" +statusResp.getStatus());
-      //			} catch (YarnRemoteException e) {
-      //				e.printStackTrace();
-      //			}
-    }		
-  }	
+      //    GetContainerStatusRequest statusReq = Records.newRecord(GetContainerStatusRequest.class);
+      //    statusReq.setContainerId(container.getId());
+      //    GetContainerStatusResponse statusResp;
+      //try {
+      //statusResp = cm.getContainerStatus(statusReq);
+      //    LOG.info("Container Status"
+      //    + ", id=" + container.getId()
+      //    + ", status=" +statusResp.getStatus());
+      //} catch (YarnRemoteException e) {
+      //e.printStackTrace();
+      //}
+    }
+  }
 
   /**
    * Connect to the Resource Manager
@@ -744,25 +744,25 @@ public class ApplicationMaster {
     YarnConfiguration yarnConf = new YarnConfiguration(conf);
     InetSocketAddress rmAddress = NetUtils.createSocketAddr(yarnConf.get(
         YarnConfiguration.RM_SCHEDULER_ADDRESS,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS));		
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS));
     LOG.info("Connecting to ResourceManager at " + rmAddress);
     return ((AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf));
-  }		
+  }
 
   /** 
    * Register the Application Master to the Resource Manager
    * @return the registration response from the RM
    * @throws YarnRemoteException
    */
-  private RegisterApplicationMasterResponse registerToRM() throws YarnRemoteException {		
-    RegisterApplicationMasterRequest appMasterRequest = Records.newRecord(RegisterApplicationMasterRequest.class);	
+  private RegisterApplicationMasterResponse registerToRM() throws YarnRemoteException {
+    RegisterApplicationMasterRequest appMasterRequest = Records.newRecord(RegisterApplicationMasterRequest.class);
 
     // set the required info into the registration request: 
     // application attempt id, 
     // host on which the app master is running
     // rpc port on which the app master accepts requests from the client 
     // tracking url for the app master
-    appMasterRequest.setApplicationAttemptId(appAttemptID);	
+    appMasterRequest.setApplicationAttemptId(appAttemptID);
     appMasterRequest.setHost(appMasterHostname);
     appMasterRequest.setRpcPort(appMasterRpcPort);
     appMasterRequest.setTrackingUrl(appMasterTrackingUrl);
@@ -792,7 +792,7 @@ public class ApplicationMaster {
     Priority pri = Records.newRecord(Priority.class);
     // TODO - what is the range for priority? how to decide? 
     pri.setPriority(requestPriority);
-    request.setPriority(pri);	    
+    request.setPriority(pri);
 
     // Set up resource type requirements
     // For now, only memory is supported so we set memory requirements
@@ -810,7 +810,7 @@ public class ApplicationMaster {
    * @throws YarnRemoteException
    */
   private AMResponse sendContainerAskToRM(List<ResourceRequest> requestedContainers)
-      throws YarnRemoteException {	
+      throws YarnRemoteException {
     AllocateRequest req = Records.newRecord(AllocateRequest.class);
     req.setResponseId(rmRequestID.incrementAndGet());
     req.setApplicationAttemptId(appAttemptID);
@@ -830,7 +830,7 @@ public class ApplicationMaster {
       LOG.info("Released container, id=" + id.getId());
     }
 
-    AllocateResponse resp = resourceManager.allocate(req);		
-    return resp.getAMResponse();		    
+    AllocateResponse resp = resourceManager.allocate(req);
+    return resp.getAMResponse();
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java Tue Dec 27 18:18:36 2011
@@ -75,7 +75,7 @@ public class AsyncDispatcher extends Abs
           try {
             event = eventQueue.take();
           } catch(InterruptedException ie) {
-            LOG.info("AsyncDispatcher thread interrupted", ie);
+            LOG.warn("AsyncDispatcher thread interrupted", ie);
             return;
           }
           if (event != null) {
@@ -114,8 +114,10 @@ public class AsyncDispatcher extends Abs
   @SuppressWarnings("unchecked")
   protected void dispatch(Event event) {
     //all events go thru this loop
-    LOG.debug("Dispatching the event " + event.getClass().getName() + "."
-        + event.toString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Dispatching the event " + event.getClass().getName() + "."
+          + event.toString());
+    }
 
     Class<? extends Enum> type = event.getType().getDeclaringClass();
 
@@ -131,12 +133,11 @@ public class AsyncDispatcher extends Abs
     }
   }
 
+  @SuppressWarnings("unchecked")
   @Override
-  @SuppressWarnings("rawtypes")
   public void register(Class<? extends Enum> eventType,
       EventHandler handler) {
     /* check to see if we have a listener registered */
-    @SuppressWarnings("unchecked")
     EventHandler<Event> registeredHandler = (EventHandler<Event>)
     eventDispatchers.get(eventType);
     LOG.info("Registering " + eventType + " for " + handler.getClass());
@@ -170,7 +171,7 @@ public class AsyncDispatcher extends Abs
       }
       int remCapacity = eventQueue.remainingCapacity();
       if (remCapacity < 1000) {
-        LOG.info("Very low remaining capacity in the event-queue: "
+        LOG.warn("Very low remaining capacity in the event-queue: "
             + remCapacity);
       }
       try {
@@ -186,7 +187,6 @@ public class AsyncDispatcher extends Abs
    * are interested in the event.
    * @param <T> the type of event these multiple handlers are interested in.
    */
-  @SuppressWarnings("rawtypes")
   static class MultiListenerHandler implements EventHandler<Event> {
     List<EventHandler<Event>> listofHandlers;
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/service/CompositeService.java Tue Dec 27 18:18:36 2011
@@ -81,6 +81,10 @@ public class CompositeService extends Ab
   }
 
   public synchronized void stop() {
+    if (this.getServiceState() == STATE.STOPPED) {
+      // The base composite-service is already stopped, don't do anything again.
+      return;
+    }
     if (serviceList.size() > 0) {
       stop(serviceList.size() - 1);
     }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java Tue Dec 27 18:18:36 2011
@@ -88,6 +88,14 @@ public class TestCompositeService {
           ((NUM_OF_SERVICES - 1) - i), services[i].getCallSequenceNumber());
     }
 
+    // Try to stop again. This should be a no-op.
+    serviceManager.stop();
+    // Verify that stop() call sequence numbers for every service don't change.
+    for (int i = 0; i < NUM_OF_SERVICES; i++) {
+      assertEquals("For " + services[i]
+          + " service, stop() call sequence number should have been ",
+          ((NUM_OF_SERVICES - 1) - i), services[i].getCallSequenceNumber());
+    }
   }
 
   @Test
@@ -153,7 +161,7 @@ public class TestCompositeService {
 
     serviceManager.start();
 
-    // Start the composite service
+    // Stop the composite service
     try {
       serviceManager.stop();
     } catch (YarnException e) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java Tue Dec 27 18:18:36 2011
@@ -285,6 +285,7 @@ public class ApplicationMasterService ex
       response.setAvailableResources(allocation.getResourceLimit());
       responseMap.put(appAttemptId, response);
       allocateResponse.setAMResponse(response);
+      allocateResponse.setNumClusterNodes(this.rScheduler.getNumClusterNodes());
       return allocateResponse;
     }
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java Tue Dec 27 18:18:36 2011
@@ -80,6 +80,14 @@ public interface YarnScheduler extends E
   public Resource getMaximumResourceCapability();
 
   /**
+   * Get the number of nodes available in the cluster.
+   * @return the number of available nodes.
+   */
+  @Public
+  @Stable
+  public int getNumClusterNodes();
+  
+  /**
    * The main api between the ApplicationMaster and the Scheduler.
    * The ApplicationMaster is updating his future resource requirements
    * and may release containers he doens't need.

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java Tue Dec 27 18:18:36 2011
@@ -159,6 +159,7 @@ implements ResourceScheduler, CapacitySc
     return maximumAllocation;
   }
 
+  @Override
   public synchronized int getNumClusterNodes() {
     return numNodeManagers;
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java Tue Dec 27 18:18:36 2011
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo;
 
 import java.io.IOException;
-import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -36,7 +35,6 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.Lock;
@@ -180,6 +178,11 @@ public class FifoScheduler implements Re
   }
 
   @Override
+  public int getNumClusterNodes() {
+    return nodes.size();
+  }
+  
+  @Override
   public Resource getMaximumResourceCapability() {
     return maximumAllocation;
   }

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/c++/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/c++:1159757-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/c++:1159757-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/c++:713112
 /hadoop/core/trunk/src/c++:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:1152502-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:1152502-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/contrib:713112
 /hadoop/core/trunk/src/contrib:784664-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/block_forensics/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:1152502-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:1152502-1224959
 /hadoop/core/branches/branch-0.19/hdfs/src/contrib/block_forensics:713112
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/block_forensics:713112
 /hadoop/core/trunk/src/contrib/block_forensics:784664-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/build-contrib.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:1161333-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:1161333-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/build-contrib.xml:713112
 /hadoop/core/trunk/src/contrib/build-contrib.xml:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:1161333-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:1161333-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/build.xml:713112
 /hadoop/core/trunk/src/contrib/build.xml:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/data_join/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:1159757-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:1159757-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/data_join:713112
 /hadoop/core/trunk/src/contrib/data_join:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/eclipse-plugin/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:1159757-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:1159757-1224959
 /hadoop/core/branches/branch-0.19/core/src/contrib/eclipse-plugin:713112
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/eclipse-plugin:713112
 /hadoop/core/trunk/src/contrib/eclipse-plugin:776175-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/index/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:1159757-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:1159757-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/index:713112
 /hadoop/core/trunk/src/contrib/index:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/vaidya/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:1159757-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:1159757-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/vaidya:713112
 /hadoop/core/trunk/src/contrib/vaidya:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/examples/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/examples:1152502-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/examples:1152502-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/examples:713112
 /hadoop/core/trunk/src/examples:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/java:1152502-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/java:1152502-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/java:713112
 /hadoop/core/trunk/src/mapred:776175-785643

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobInProgress.java Tue Dec 27 18:18:36 2011
@@ -2671,7 +2671,9 @@ public class JobInProgress {
     // Update jobhistory 
     TaskTrackerStatus ttStatus = 
       this.jobtracker.getTaskTrackerStatus(status.getTaskTracker());
-    String trackerHostname = jobtracker.getNode(ttStatus.getHost()).toString();
+    Node node = jobtracker.getNode(ttStatus.getHost());
+    String trackerHostname = node.getName();
+    String trackerRackName = node.getParent().getName(); 
     TaskType taskType = getTaskType(tip);
 
     TaskAttemptStartedEvent tse = new TaskAttemptStartedEvent(
@@ -2685,7 +2687,7 @@ public class JobInProgress {
       MapAttemptFinishedEvent mfe = new MapAttemptFinishedEvent(
           statusAttemptID, taskType, TaskStatus.State.SUCCEEDED.toString(),
           status.getMapFinishTime(),
-          status.getFinishTime(),  trackerHostname, -1, "",
+          status.getFinishTime(),  trackerHostname, -1, trackerRackName,
           status.getStateString(), 
           new org.apache.hadoop.mapreduce.Counters(status.getCounters()),
           tip.getSplits(statusAttemptID).burst()
@@ -2698,7 +2700,7 @@ public class JobInProgress {
           statusAttemptID, taskType, TaskStatus.State.SUCCEEDED.toString(), 
           status.getShuffleFinishTime(),
           status.getSortFinishTime(), status.getFinishTime(),
-          trackerHostname, -1, "", status.getStateString(),
+          trackerHostname, -1, trackerRackName, status.getStateString(),
           new org.apache.hadoop.mapreduce.Counters(status.getCounters()),
           tip.getSplits(statusAttemptID).burst()
           );
@@ -3208,7 +3210,7 @@ public class JobInProgress {
             (taskid, 
              taskType, taskStatus.getRunState().toString(),
              finishTime, 
-             taskTrackerHostName, -1, diagInfo,
+             taskTrackerHostName, -1, null, diagInfo,
              splits.burst());
     jobHistory.logEvent(tue, taskid.getJobID());
         

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:1152502-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:1152502-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred:713112
 /hadoop/core/trunk/src/test/mapred:776175-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:1159757-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:1159757-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/fs:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/fs:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:1152502-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:1152502-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/hdfs:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/hdfs:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/hdfs:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/FileBench.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/FileBench.java:1161333-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/FileBench.java:1161333-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/io/FileBench.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/io/FileBench.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/io/FileBench.java:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:1161333-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:1161333-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:1159757-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:1159757-1224959
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/ipc:713112
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/ipc:713112
 /hadoop/core/trunk/src/test/hdfs-with-mr/org/apache/hadoop/ipc:776175-784663

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java Tue Dec 27 18:18:36 2011
@@ -83,7 +83,7 @@ public class TestJobHistoryEvents extend
     for (TaskType t : types) {
       TaskAttemptUnsuccessfulCompletionEvent tauce = 
         new TaskAttemptUnsuccessfulCompletionEvent
-           (id, t, state, 0L, "", -1, "", NULL_SPLITS_ARRAY);
+           (id, t, state, 0L, "", -1, "", "", NULL_SPLITS_ARRAY);
       assertEquals(expected, tauce.getEventType());
     }
   }
@@ -132,7 +132,8 @@ public class TestJobHistoryEvents extend
     for (TaskType t : types) {
       TaskAttemptFinishedEvent tafe = 
         new TaskAttemptFinishedEvent(id, t, 
-            TaskStatus.State.SUCCEEDED.toString(), 0L, "", "", new Counters());
+            TaskStatus.State.SUCCEEDED.toString(), 0L, "", "", "", 
+            new Counters());
       assertEquals(expected, tafe.getEventType());
     }
   }

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:1161333-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:1161333-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 27 18:18:36 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:1161333-1220940
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:1161333-1224959
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:817878-835934

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java Tue Dec 27 18:18:36 2011
@@ -26,6 +26,8 @@ import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
@@ -42,6 +44,7 @@ import org.apache.hadoop.mapreduce.TaskA
 import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.TestNoJobSetupCleanup.MyOutputFormat;
 import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistory;
@@ -49,6 +52,9 @@ import org.apache.hadoop.mapreduce.jobhi
 import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
 import org.apache.hadoop.tools.rumen.TraceBuilder.MyOptions;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -57,6 +63,8 @@ import org.junit.Test;
 import static org.junit.Assert.*;
 
 public class TestRumenJobTraces {
+  private static final Log LOG = LogFactory.getLog(TestRumenJobTraces.class);
+
   @Test
   public void testSmallTrace() throws Exception {
     performSingleTest("sample-job-tracker-logs.gz",
@@ -232,11 +240,21 @@ public class TestRumenJobTraces {
       parser = new Hadoop20JHParser(ris);
       ArrayList<String> seenEvents = new ArrayList<String>(150);
 
-      getHistoryEvents(parser, seenEvents, null); // get events into seenEvents
+      // this is same as the one in input history file
+      String jobId = "job_200904211745_0002";
+      JobBuilder builder = new JobBuilder(jobId);
+
+      // get events into seenEvents
+      getHistoryEvents(parser, seenEvents, builder);
 
       // Validate the events seen by history parser from
       // history file v20-single-input-log.gz
       validateSeenHistoryEvents(seenEvents, goldLines);
+
+      ParsedJob parsedJob = builder.build();
+      // validate the obtainXXX api of ParsedJob, ParsedTask and
+      // ParsedTaskAttempt
+      validateParsedJob(parsedJob, 20, 1, true);
     } finally {
       if (parser != null) {
         parser.close();
@@ -246,8 +264,10 @@ public class TestRumenJobTraces {
   }
 
   /**
-   * Validate the parsing of given history file name. Also validate the history
-   * file name suffixed with old/stale file suffix.
+   * Validate the parsing of given history file name. 
+   * 
+   * TODO: Also validate the history file name suffixed with old/stale file 
+   *       suffix.
    * @param jhFileName job history file path
    * @param jid JobID
    */
@@ -257,13 +277,7 @@ public class TestRumenJobTraces {
       JobID.forName(JobHistoryUtils.extractJobID(jhFileName.getName()));
     assertEquals("TraceBuilder failed to parse the current JH filename"
                  + jhFileName, jid, extractedJID);
-    // test jobhistory filename with old/stale file suffix
-    jhFileName = jhFileName.suffix(JobHistory.getOldFileSuffix("123"));
-    extractedJID =
-      JobID.forName(JobHistoryUtils.extractJobID(jhFileName.getName()));
-    assertEquals("TraceBuilder failed to parse the current JH filename"
-                 + "(old-suffix):" + jhFileName,
-                 jid, extractedJID);
+    //TODO test jobhistory filename with old/stale file suffix
   }
 
   /**
@@ -318,8 +332,9 @@ public class TestRumenJobTraces {
             .makeQualified(lfs.getUri(), lfs.getWorkingDirectory());
     
     // Check if current jobhistory filenames are detected properly
-    Path jhFilename = org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils
-        .getStagingJobHistoryFile(rootInputDir, jid.toString(), 1);
+    JobId jobId = TypeConverter.toYarn(jid);
+    JobIndexInfo info = new JobIndexInfo(0L, 0L, "", "", jobId, 0, 0, "");
+    Path jhFilename = new Path(FileNameIndexUtils.getDoneFileName(info));
     validateHistoryFileNameParsing(jhFilename, jid);
 
     // Check if Pre21 V1 jophistory file names are detected properly
@@ -583,9 +598,11 @@ public class TestRumenJobTraces {
       // validate resource usage metrics
       //  get the job counters
       Counters counters = job.getTaskReports(TaskType.MAP)[0].getTaskCounters();
-      
+
+      // get the parsed job
+      ParsedJob parsedJob = builder.build();
       //  get the logged job
-      LoggedJob loggedJob = builder.build();
+      LoggedJob loggedJob = parsedJob;
       //  get the logged attempts
       LoggedTaskAttempt attempt = 
         loggedJob.getMapTasks().get(0).getAttempts().get(0);
@@ -599,6 +616,10 @@ public class TestRumenJobTraces {
           counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES).getValue(),
           counters.findCounter(TaskCounter.COMMITTED_HEAP_BYTES).getValue(),
           true);
+
+      // validate the obtainXXX api of ParsedJob, ParsedTask and
+      // ParsedTaskAttempt
+      validateParsedJob(parsedJob, 1, 1, false);
     } finally {
       // stop the MR cluster
       mrCluster.shutdown();
@@ -615,6 +636,142 @@ public class TestRumenJobTraces {
     }
   }
 
+  /**
+   * Verify if the obtainXXX methods of {@link ParsedJob}, {@link ParsedTask}
+   * and {@link ParsedTaskAttempt} give valid info
+   */
+  private void validateParsedJob(ParsedJob parsedJob, int numMaps,
+      int numReduces, boolean pre21JobHistory) {
+    validateParsedJobAPI(parsedJob, numMaps, numReduces, pre21JobHistory);
+
+    List<ParsedTask> maps = parsedJob.obtainMapTasks();
+    for (ParsedTask task : maps) {
+      validateParsedTask(task);
+    }
+    List<ParsedTask> reduces = parsedJob.obtainReduceTasks();
+    for (ParsedTask task : reduces) {
+      validateParsedTask(task);
+    }
+    List<ParsedTask> others = parsedJob.obtainOtherTasks();
+    for (ParsedTask task : others) {
+      validateParsedTask(task);
+    }
+  }
+
+  /** Verify if the obtainXXX methods of {@link ParsedJob} give valid info */
+  private void validateParsedJobAPI(ParsedJob parsedJob, int numMaps,
+      int numReduces, boolean pre21JobHistory) {
+    LOG.info("Validating ParsedJob.obtainXXX api... for "
+             + parsedJob.getJobID());
+    assertNotNull("Job acls in ParsedJob is null",
+                  parsedJob.obtainJobAcls());
+    assertNotNull("Job conf path in ParsedJob is null",
+                  parsedJob.obtainJobConfpath());
+
+    assertNotNull("Map Counters in ParsedJob is null",
+                  parsedJob.obtainMapCounters());
+    assertNotNull("Reduce Counters in ParsedJob is null",
+                  parsedJob.obtainReduceCounters());
+    assertNotNull("Total Counters in ParsedJob is null",
+                  parsedJob.obtainTotalCounters());
+
+    assertNotNull("Map Tasks List in ParsedJob is null",
+                  parsedJob.obtainMapTasks());
+    assertNotNull("Reduce Tasks List in ParsedJob is null",
+                  parsedJob.obtainReduceTasks());
+    assertNotNull("Other Tasks List in ParsedJob is null",
+                  parsedJob.obtainOtherTasks());
+
+    // 1 map and 1 reduce task should be there
+    assertEquals("Number of map tasks in ParsedJob is wrong",
+                 numMaps, parsedJob.obtainMapTasks().size());
+    assertEquals("Number of reduce tasks in ParsedJob is wrong",
+                 numReduces, parsedJob.obtainReduceTasks().size(), 1);
+
+    // old hadoop20 version history files don't have job-level-map-counters and
+    // job-level-reduce-counters. Only total counters exist there.
+    assertTrue("Total Counters in ParsedJob is empty",
+               parsedJob.obtainTotalCounters().size() > 0);
+    if (!pre21JobHistory) {
+      assertTrue("Map Counters in ParsedJob is empty",
+                 parsedJob.obtainMapCounters().size() > 0);
+      assertTrue("Reduce Counters in ParsedJob is empty",
+                 parsedJob.obtainReduceCounters().size() > 0);
+    }
+  }
+
+  /**
+   * Verify if the obtainXXX methods of {@link ParsedTask} and
+   * {@link ParsedTaskAttempt} give valid info
+   */
+  private void validateParsedTask(ParsedTask parsedTask) {
+    validateParsedTaskAPI(parsedTask);
+
+    List<ParsedTaskAttempt> attempts = parsedTask.obtainTaskAttempts();
+    for (ParsedTaskAttempt attempt : attempts) {
+      validateParsedTaskAttemptAPI(attempt);
+    }
+  }
+
+  /** Verify if the obtainXXX methods of {@link ParsedTask} give valid info */
+  private void validateParsedTaskAPI(ParsedTask parsedTask) {
+    LOG.info("Validating ParsedTask.obtainXXX api... for "
+             + parsedTask.getTaskID());
+    assertNotNull("Task counters in ParsedTask is null",
+                  parsedTask.obtainCounters());
+
+    if (parsedTask.getTaskStatus()
+        == Pre21JobHistoryConstants.Values.SUCCESS) {
+      // task counters should not be empty
+      assertTrue("Task counters in ParsedTask is empty",
+                 parsedTask.obtainCounters().size() > 0);
+      assertNull("Diagnostic-info is non-null for a succeeded task",
+                 parsedTask.obtainDiagnosticInfo());
+      assertNull("Failed-due-to-attemptId is non-null for a succeeded task",
+                 parsedTask.obtainFailedDueToAttemptId());
+    } else {
+      assertNotNull("Diagnostic-info is non-null for a succeeded task",
+                    parsedTask.obtainDiagnosticInfo());
+      assertNotNull("Failed-due-to-attemptId is non-null for a succeeded task",
+                    parsedTask.obtainFailedDueToAttemptId());
+    }
+
+    List<ParsedTaskAttempt> attempts = parsedTask.obtainTaskAttempts();
+    assertNotNull("TaskAttempts list in ParsedTask is null", attempts);
+    assertTrue("TaskAttempts list in ParsedTask is empty",
+               attempts.size() > 0);    
+  }
+
+  /**
+   * Verify if the obtainXXX methods of {@link ParsedTaskAttempt} give
+   * valid info
+   */
+  private void validateParsedTaskAttemptAPI(
+      ParsedTaskAttempt parsedTaskAttempt) {
+    LOG.info("Validating ParsedTaskAttempt.obtainXXX api... for "
+             + parsedTaskAttempt.getAttemptID());
+    assertNotNull("Counters in ParsedTaskAttempt is null",
+                  parsedTaskAttempt.obtainCounters());
+
+    if (parsedTaskAttempt.getResult()
+        == Pre21JobHistoryConstants.Values.SUCCESS) { 
+      assertTrue("Counters in ParsedTaskAttempt is empty",
+               parsedTaskAttempt.obtainCounters().size() > 0);
+      assertNull("Diagnostic-info is non-null for a succeeded taskAttempt",
+                 parsedTaskAttempt.obtainDiagnosticInfo());
+    } else {
+      assertNotNull("Diagnostic-info is non-null for a succeeded taskAttempt",
+                 parsedTaskAttempt.obtainDiagnosticInfo());
+    }
+    assertNotNull("TrackerName in ParsedTaskAttempt is null",
+                  parsedTaskAttempt.obtainTrackerName());
+
+    assertNotNull("http-port info in ParsedTaskAttempt is null",
+        parsedTaskAttempt.obtainHttpPort());
+    assertNotNull("Shuffle-port info in ParsedTaskAttempt is null",
+        parsedTaskAttempt.obtainShufflePort());
+  }
+
   @Test
   public void testJobConfigurationParser() throws Exception {
 
@@ -932,18 +1089,18 @@ public class TestRumenJobTraces {
     subject.process(new TaskAttemptFinishedEvent(TaskAttemptID
         .forName("attempt_200904211745_0003_m_000004_0"), TaskType
         .valueOf("MAP"), "STATUS", 1234567890L,
-        "/194\\.6\\.134\\.64/cluster50261\\.secondleveldomain\\.com",
+        "/194\\.6\\.134\\.64", "cluster50261\\.secondleveldomain\\.com",
         "SUCCESS", null));
     subject.process(new TaskAttemptUnsuccessfulCompletionEvent
                     (TaskAttemptID.forName("attempt_200904211745_0003_m_000004_1"),
                      TaskType.valueOf("MAP"), "STATUS", 1234567890L,
-                     "/194\\.6\\.134\\.80/cluster50262\\.secondleveldomain\\.com",
-                     -1, "MACHINE_EXPLODED", splits));
+                     "cluster50262\\.secondleveldomain\\.com",
+                     -1, "/194\\.6\\.134\\.80", "MACHINE_EXPLODED", splits));
     subject.process(new TaskAttemptUnsuccessfulCompletionEvent
                     (TaskAttemptID.forName("attempt_200904211745_0003_m_000004_2"),
                      TaskType.valueOf("MAP"), "STATUS", 1234567890L,
-                     "/194\\.6\\.134\\.80/cluster50263\\.secondleveldomain\\.com",
-                     -1, "MACHINE_EXPLODED", splits));
+                     "cluster50263\\.secondleveldomain\\.com",
+                     -1, "/194\\.6\\.134\\.80", "MACHINE_EXPLODED", splits));
     subject.process(new TaskStartedEvent(TaskID
         .forName("task_200904211745_0003_m_000004"), 1234567890L, TaskType
         .valueOf("MAP"),

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output Tue Dec 27 18:18:36 2011
@@ -6,6 +6,9 @@
       "name" : "cluster50213\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50235\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50226\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -21,6 +24,9 @@
       "name" : "cluster50231\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50223\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50232\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -98,6 +104,9 @@
     }, {
       "name" : "cluster1236\\.secondleveldomain\\.com",
       "children" : null
+    }, {
+      "name" : "cluster1232\\.secondleveldomain\\.com",
+      "children" : null
     } ]
   }, {
     "name" : "194\\.6\\.134\\.64",
@@ -105,6 +114,9 @@
       "name" : "cluster50317\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50283\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50292\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -146,6 +158,9 @@
     }, {
       "name" : "cluster50316\\.secondleveldomain\\.com",
       "children" : null
+    }, {
+      "name" : "cluster50303\\.secondleveldomain\\.com",
+      "children" : null
     } ]
   }, {
     "name" : "194\\.6\\.129\\.128",
@@ -432,6 +447,9 @@
       "name" : "cluster50120\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50132\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50130\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -567,9 +585,15 @@
       "name" : "cluster50166\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50173\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50170\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50189\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50179\\.secondleveldomain\\.com",
       "children" : null
     } ]
@@ -579,28 +603,37 @@
       "name" : "cluster1283\\.secondleveldomain\\.com",
       "children" : null
     }, {
-      "name" : "cluster1299\\.secondleveldomain\\.com",
+      "name" : "cluster1295\\.secondleveldomain\\.com",
       "children" : null
     }, {
-      "name" : "cluster1281\\.secondleveldomain\\.com",
+      "name" : "cluster1302\\.secondleveldomain\\.com",
       "children" : null
     }, {
-      "name" : "cluster1288\\.secondleveldomain\\.com",
+      "name" : "cluster1294\\.secondleveldomain\\.com",
       "children" : null
     }, {
-      "name" : "cluster1302\\.secondleveldomain\\.com",
+      "name" : "cluster1310\\.secondleveldomain\\.com",
       "children" : null
     }, {
-      "name" : "cluster1294\\.secondleveldomain\\.com",
+      "name" : "cluster1305\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
+      "name" : "cluster1299\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
+      "name" : "cluster1281\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
+      "name" : "cluster1288\\.secondleveldomain\\.com",
       "children" : null
     }, {
       "name" : "cluster1289\\.secondleveldomain\\.com",
       "children" : null
     }, {
-      "name" : "cluster1315\\.secondleveldomain\\.com",
+      "name" : "cluster1314\\.secondleveldomain\\.com",
       "children" : null
     }, {
-      "name" : "cluster1305\\.secondleveldomain\\.com",
+      "name" : "cluster1315\\.secondleveldomain\\.com",
       "children" : null
     }, {
       "name" : "cluster1316\\.secondleveldomain\\.com",
@@ -663,6 +696,9 @@
       "name" : "cluster3054\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster3064\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster3077\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -696,6 +732,9 @@
       "name" : "cluster50468\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50445\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50476\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -786,6 +825,9 @@
       "name" : "cluster50493\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50511\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50510\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -1100,6 +1142,9 @@
     }, {
       "name" : "cluster1907\\.secondleveldomain\\.com",
       "children" : null
+    }, {
+      "name" : "cluster1917\\.secondleveldomain\\.com",
+      "children" : null
     } ]
   }, {
     "name" : "192\\.30\\.63\\.192",
@@ -1223,6 +1268,9 @@
     }, {
       "name" : "cluster1446\\.secondleveldomain\\.com",
       "children" : null
+    }, {
+      "name" : "cluster1440\\.secondleveldomain\\.com",
+      "children" : null
     } ]
   }, {
     "name" : "194\\.6\\.132\\.128",
@@ -1239,6 +1287,9 @@
       "name" : "cluster50025\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50024\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50021\\.secondleveldomain\\.com",
       "children" : null
     } ]
@@ -1293,6 +1344,9 @@
       "name" : "cluster50348\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50346\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50325\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -1380,6 +1434,9 @@
       "name" : "cluster1662\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster1647\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster1649\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -1430,6 +1487,9 @@
     }, {
       "name" : "cluster1503\\.secondleveldomain\\.com",
       "children" : null
+    }, {
+      "name" : "cluster1514\\.secondleveldomain\\.com",
+      "children" : null
     } ]
   }, {
     "name" : "194\\.6\\.129\\.0",
@@ -1440,6 +1500,9 @@
       "name" : "cluster50539\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50533\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50530\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -1476,6 +1539,9 @@
       "name" : "cluster50418\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster50406\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster50411\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -1527,6 +1593,9 @@
   }, {
     "name" : "194\\.6\\.128\\.64",
     "children" : [ {
+      "name" : "cluster1613\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster1639\\.secondleveldomain\\.com",
       "children" : null
     }, {
@@ -1574,6 +1643,9 @@
     }, {
       "name" : "cluster1602\\.secondleveldomain\\.com",
       "children" : null
+    }, {
+      "name" : "cluster1627\\.secondleveldomain\\.com",
+      "children" : null
     } ]
   }, {
     "name" : "194\\.6\\.132\\.192",
@@ -1662,6 +1734,9 @@
       "name" : "cluster1736\\.secondleveldomain\\.com",
       "children" : null
     }, {
+      "name" : "cluster1735\\.secondleveldomain\\.com",
+      "children" : null
+    }, {
       "name" : "cluster1722\\.secondleveldomain\\.com",
       "children" : null
     }, {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java?rev=1224965&r1=1224964&r2=1224965&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/tools/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java Tue Dec 27 18:18:36 2011
@@ -1308,6 +1308,8 @@ public class HadoopLogsAnalyzer extends 
         if (host != null) {
           attempt.setHostName(host.getNodeName(), host.getRackName());
           attempt.setLocation(host.makeLoggedLocation());
+        } else {
+          attempt.setHostName(hostName, null);
         }
 
         List<LoggedLocation> locs = task.getPreferredLocations();
@@ -1470,9 +1472,13 @@ public class HadoopLogsAnalyzer extends 
         }
       }
 
-      ParsedHost host = getAndRecordParsedHost(hostName);
-      if (host != null) {
-        attempt.setHostName(host.getNodeName(), host.getRackName());
+      if (hostName != null) {
+        ParsedHost host = getAndRecordParsedHost(hostName);
+        if (host != null) {
+          attempt.setHostName(host.getNodeName(), host.getRackName());
+        } else {
+          attempt.setHostName(hostName, null);
+        }
       }
 
       if (attemptID != null) {