You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by at...@apache.org on 2011/11/02 06:35:03 UTC

svn commit: r1196458 [17/19] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ assembly/ bin/ conf/ dev-support/ hadoop-mapreduce-client/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/ hadoop-mapreduce-client/hadoop-mapreduce-cl...

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java Wed Nov  2 05:34:31 2011
@@ -42,6 +42,7 @@ public class MockNM {
   private final String nodeIdStr;
   private final int memory;
   private final ResourceTrackerService resourceTracker;
+  private final int httpPort = 2;
 
   MockNM(String nodeIdStr, int memory, ResourceTrackerService resourceTracker) {
     this.nodeIdStr = nodeIdStr;
@@ -53,12 +54,16 @@ public class MockNM {
     return nodeId;
   }
 
+  public int getHttpPort() {
+    return httpPort;
+  }
+
   public void containerStatus(Container container) throws Exception {
     Map<ApplicationId, List<ContainerStatus>> conts = 
         new HashMap<ApplicationId, List<ContainerStatus>>();
     conts.put(container.getId().getApplicationAttemptId().getApplicationId(), 
         Arrays.asList(new ContainerStatus[] { container.getContainerStatus() }));
-    nodeHeartbeat(conts, true);
+    nodeHeartbeat(conts, true,nodeId);
   }
 
   public NodeId registerNode() throws Exception {
@@ -69,7 +74,7 @@ public class MockNM {
     RegisterNodeManagerRequest req = Records.newRecord(
         RegisterNodeManagerRequest.class);
     req.setNodeId(nodeId);
-    req.setHttpPort(2);
+    req.setHttpPort(httpPort);
     Resource resource = Records.newRecord(Resource.class);
     resource.setMemory(memory);
     req.setResource(resource);
@@ -78,11 +83,11 @@ public class MockNM {
   }
 
   public HeartbeatResponse nodeHeartbeat(boolean b) throws Exception {
-    return nodeHeartbeat(new HashMap<ApplicationId, List<ContainerStatus>>(), b);
+    return nodeHeartbeat(new HashMap<ApplicationId, List<ContainerStatus>>(), b,nodeId);
   }
 
   public HeartbeatResponse nodeHeartbeat(Map<ApplicationId, 
-      List<ContainerStatus>> conts, boolean isHealthy) throws Exception {
+      List<ContainerStatus>> conts, boolean isHealthy, NodeId nodeId) throws Exception {
     NodeHeartbeatRequest req = Records.newRecord(NodeHeartbeatRequest.class);
     NodeStatus status = Records.newRecord(NodeStatus.class);
     status.setNodeId(nodeId);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java Wed Nov  2 05:34:31 2011
@@ -139,7 +139,8 @@ public class MockRM extends ResourceMana
 
   @Override
   protected ClientRMService createClientRMService() {
-    return new ClientRMService(getRMContext(), getResourceScheduler(), rmAppManager) {
+    return new ClientRMService(getRMContext(), getResourceScheduler(),
+        rmAppManager, applicationACLsManager) {
       @Override
       public void start() {
         //override to not start rpc handler
@@ -202,9 +203,13 @@ public class MockRM extends ResourceMana
   }
 
   @Override
-  protected AdminService createAdminService() {
-    return new AdminService(getConfig(), scheduler, getRMContext(), 
-        this.nodesListManager){
+  protected AdminService createAdminService(
+      ClientRMService clientRMService, 
+      ApplicationMasterService applicationMasterService,
+      ResourceTrackerService resourceTrackerService) {
+    return new AdminService(
+        getConfig(), scheduler, getRMContext(), this.nodesListManager,
+        clientRMService, applicationMasterService, resourceTrackerService){
       @Override
       public void start() {
         //override to not start rpc handler
@@ -215,6 +220,10 @@ public class MockRM extends ResourceMana
       }
     };
   }
+  
+  public NodesListManager getNodesListManager() {
+    return this.nodesListManager;
+  }
 
   @Override
   protected void startWepApp() {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java Wed Nov  2 05:34:31 2011
@@ -45,7 +45,6 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -186,7 +185,7 @@ public class NodeManager implements Cont
         BuilderUtils.newContainer(containerLaunchContext.getContainerId(),
             this.nodeId, nodeHttpAddress,
             containerLaunchContext.getResource(), 
-            null                                        // DKDC - Doesn't matter
+            null, null                                 // DKDC - Doesn't matter
             );
 
     applicationContainers.add(container);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java Wed Nov  2 05:34:31 2011
@@ -19,29 +19,27 @@
 package org.apache.hadoop.yarn.server.resourcemanager;
 
 
+import java.util.HashMap;
 import java.util.List;
 import java.util.concurrent.ConcurrentMap;
 
 import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.MockApps;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.MockApps;
+import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
 import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
-import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
-import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType;
-import org.apache.hadoop.yarn.server.resourcemanager.RMAppManager;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
@@ -49,15 +47,15 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.service.Service;
-
 import org.junit.Test;
-import com.google.common.collect.Maps;
+
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 
 /**
  * Testing applications being retired from RM.
@@ -95,7 +93,7 @@ public class TestAppManager{
     AMLivelinessMonitor amLivelinessMonitor = new AMLivelinessMonitor(
         rmDispatcher);
     return new RMContextImpl(new MemStore(), rmDispatcher,
-        containerAllocationExpirer, amLivelinessMonitor) {
+        containerAllocationExpirer, amLivelinessMonitor, null) {
       @Override
       public ConcurrentMap<ApplicationId, RMApp> getRMApps() {
         return map;
@@ -135,14 +133,16 @@ public class TestAppManager{
   public class TestRMAppManager extends RMAppManager {
 
     public TestRMAppManager(RMContext context, Configuration conf) {
-      super(context, null, null, null, conf);
+      super(context, null, null, null, new ApplicationACLsManager(conf), conf);
       setCompletedAppsMax(YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
     }
 
-    public TestRMAppManager(RMContext context, ClientToAMSecretManager
-        clientToAMSecretManager, YarnScheduler scheduler,
-        ApplicationMasterService masterService, Configuration conf) {
-      super(context, clientToAMSecretManager, scheduler, masterService, conf);
+    public TestRMAppManager(RMContext context,
+        ClientToAMSecretManager clientToAMSecretManager,
+        YarnScheduler scheduler, ApplicationMasterService masterService,
+        ApplicationACLsManager applicationACLsManager, Configuration conf) {
+      super(context, clientToAMSecretManager, scheduler, masterService,
+          applicationACLsManager, conf);
       setCompletedAppsMax(YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
     }
 
@@ -150,8 +150,8 @@ public class TestAppManager{
       super.checkAppNumCompletedLimit();
     }
 
-    public void addCompletedApp(ApplicationId appId) {
-      super.addCompletedApp(appId);
+    public void finishApplication(ApplicationId appId) {
+      super.finishApplication(appId);
     }
 
     public int getCompletedAppsListSize() {
@@ -163,7 +163,7 @@ public class TestAppManager{
     }
     public void submitApplication(
         ApplicationSubmissionContext submissionContext) {
-      super.submitApplication(submissionContext);
+      super.submitApplication(submissionContext, System.currentTimeMillis());
     }
   }
 
@@ -172,7 +172,7 @@ public class TestAppManager{
       if (app.getState() == RMAppState.FINISHED
           || app.getState() == RMAppState.KILLED 
           || app.getState() == RMAppState.FAILED) {
-        appMonitor.addCompletedApp(app.getApplicationId());
+        appMonitor.finishApplication(app.getApplicationId());
       }
     }
   }
@@ -288,7 +288,7 @@ public class TestAppManager{
     Assert.assertEquals("Number of apps incorrect before", 10, rmContext
         .getRMApps().size());
 
-    appMonitor.addCompletedApp(null);
+    appMonitor.finishApplication(null);
 
     Assert.assertEquals("Number of completed apps incorrect after check", 0,
         appMonitor.getCompletedAppsListSize());
@@ -339,14 +339,19 @@ public class TestAppManager{
     ApplicationMasterService masterService =  new ApplicationMasterService(rmContext,
         new ApplicationTokenSecretManager(), scheduler);
     Configuration conf = new Configuration();
-    TestRMAppManager appMonitor = new TestRMAppManager(rmContext, 
-        new ClientToAMSecretManager(), scheduler, masterService, conf);
+    TestRMAppManager appMonitor = new TestRMAppManager(rmContext,
+        new ClientToAMSecretManager(), scheduler, masterService,
+        new ApplicationACLsManager(conf), conf);
 
     ApplicationId appID = MockApps.newAppID(1);
     RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
     ApplicationSubmissionContext context = 
         recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
     context.setApplicationId(appID);
+    ContainerLaunchContext amContainer = recordFactory
+        .newRecordInstance(ContainerLaunchContext.class);
+    amContainer.setApplicationACLs(new HashMap<ApplicationAccessType, String>());
+    context.setAMContainerSpec(amContainer);
     setupDispatcher(rmContext, conf);
 
     appMonitor.submitApplication(context);
@@ -382,8 +387,9 @@ public class TestAppManager{
     ApplicationMasterService masterService =  new ApplicationMasterService(rmContext,
         new ApplicationTokenSecretManager(), scheduler);
     Configuration conf = new Configuration();
-    TestRMAppManager appMonitor = new TestRMAppManager(rmContext, 
-        new ClientToAMSecretManager(), scheduler, masterService, conf);
+    TestRMAppManager appMonitor = new TestRMAppManager(rmContext,
+        new ClientToAMSecretManager(), scheduler, masterService,
+        new ApplicationACLsManager(conf), conf);
 
     ApplicationId appID = MockApps.newAppID(10);
     RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
@@ -391,6 +397,11 @@ public class TestAppManager{
     context.setApplicationId(appID);
     context.setApplicationName("testApp1");
     context.setQueue("testQueue");
+    ContainerLaunchContext amContainer = recordFactory
+        .newRecordInstance(ContainerLaunchContext.class);
+    amContainer
+        .setApplicationACLs(new HashMap<ApplicationAccessType, String>());
+    context.setAMContainerSpec(amContainer);
 
     setupDispatcher(rmContext, conf);
 
@@ -424,8 +435,9 @@ public class TestAppManager{
     ApplicationMasterService masterService =  new ApplicationMasterService(rmContext,
         new ApplicationTokenSecretManager(), scheduler);
     Configuration conf = new Configuration();
-    TestRMAppManager appMonitor = new TestRMAppManager(rmContext, 
-        new ClientToAMSecretManager(), scheduler, masterService, conf);
+    TestRMAppManager appMonitor = new TestRMAppManager(rmContext,
+        new ClientToAMSecretManager(), scheduler, masterService,
+        new ApplicationACLsManager(conf), conf);
 
     ApplicationId appID = MockApps.newAppID(0);
     RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java Wed Nov  2 05:34:31 2011
@@ -31,7 +31,6 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-import org.apache.hadoop.yarn.util.Records;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java Wed Nov  2 05:34:31 2011
@@ -19,9 +19,11 @@
 package org.apache.hadoop.yarn.server.resourcemanager;
 
 import java.io.IOException;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ContainerManager;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
@@ -32,6 +34,7 @@ import org.apache.hadoop.yarn.api.protoc
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
@@ -39,6 +42,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -56,6 +60,11 @@ public class TestApplicationMasterLaunch
     boolean launched = false;
     boolean cleanedup = false;
     String attemptIdAtContainerManager = null;
+    String containerIdAtContainerManager = null;
+    String nmHostAtContainerManager = null;
+    int nmPortAtContainerManager;
+    int nmHttpPortAtContainerManager;
+    long submitTimeAtContainerManager;
 
     @Override
     public StartContainerResponse
@@ -63,9 +72,22 @@ public class TestApplicationMasterLaunch
             throws YarnRemoteException {
       LOG.info("Container started by MyContainerManager: " + request);
       launched = true;
-      attemptIdAtContainerManager = request.getContainerLaunchContext()
-          .getEnvironment().get(
-              ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV);
+      Map<String, String> env =
+          request.getContainerLaunchContext().getEnvironment();
+      containerIdAtContainerManager =
+          env.get(ApplicationConstants.AM_CONTAINER_ID_ENV);
+      ContainerId containerId =
+          ConverterUtils.toContainerId(containerIdAtContainerManager);
+      attemptIdAtContainerManager =
+          containerId.getApplicationAttemptId().toString();
+      nmHostAtContainerManager = env.get(ApplicationConstants.NM_HOST_ENV);
+      nmPortAtContainerManager =
+          Integer.parseInt(env.get(ApplicationConstants.NM_PORT_ENV));
+      nmHttpPortAtContainerManager =
+          Integer.parseInt(env.get(ApplicationConstants.NM_HTTP_PORT_ENV));
+      submitTimeAtContainerManager =
+          Long.parseLong(env.get(ApplicationConstants.APP_SUBMIT_TIME_ENV));
+
       return null;
     }
 
@@ -85,11 +107,17 @@ public class TestApplicationMasterLaunch
 
   }
 
-  private static final class MockRMWithCustomAMLauncher extends MockRM {
+  static class MockRMWithCustomAMLauncher extends MockRM {
 
     private final ContainerManager containerManager;
 
     public MockRMWithCustomAMLauncher(ContainerManager containerManager) {
+      this(new Configuration(), containerManager);
+    }
+
+    public MockRMWithCustomAMLauncher(Configuration conf,
+        ContainerManager containerManager) {
+      super(conf);
       this.containerManager = containerManager;
     }
 
@@ -105,7 +133,7 @@ public class TestApplicationMasterLaunch
               getConfig()) {
             @Override
             protected ContainerManager getContainerMgrProxy(
-                ApplicationId applicationID) throws IOException {
+                ContainerId containerId) {
               return containerManager;
             }
           };
@@ -140,6 +168,17 @@ public class TestApplicationMasterLaunch
     ApplicationAttemptId appAttemptId = attempt.getAppAttemptId();
     Assert.assertEquals(appAttemptId.toString(),
         containerManager.attemptIdAtContainerManager);
+    Assert.assertEquals(app.getSubmitTime(),
+        containerManager.submitTimeAtContainerManager);
+    Assert.assertEquals(app.getRMAppAttempt(appAttemptId)
+        .getSubmissionContext().getAMContainerSpec().getContainerId()
+        .toString(), containerManager.containerIdAtContainerManager);
+    Assert.assertEquals(nm1.getNodeId().getHost(),
+        containerManager.nmHostAtContainerManager);
+    Assert.assertEquals(nm1.getNodeId().getPort(),
+        containerManager.nmPortAtContainerManager);
+    Assert.assertEquals(nm1.getHttpPort(),
+        containerManager.nmHttpPortAtContainerManager);
 
     MockAM am = new MockAM(rm.getRMContext(), rm
         .getApplicationMasterService(), appAttemptId);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java Wed Nov  2 05:34:31 2011
@@ -167,6 +167,11 @@ public abstract class MockAsm extends Mo
     }
 
     @Override
+    public long getSubmitTime() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
+    
+    @Override
     public long getFinishTime() {
       throw new UnsupportedOperationException("Not supported yet.");
     }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java Wed Nov  2 05:34:31 2011
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker;
 
-import java.util.concurrent.atomic.AtomicInteger;
-
 import junit.framework.Assert;
 
 import org.apache.commons.logging.Log;
@@ -34,12 +32,13 @@ import org.apache.hadoop.yarn.factories.
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
-import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.NMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.NodeEventDispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
@@ -55,8 +54,6 @@ public class TestNMExpiry {
   ResourceTrackerService resourceTrackerService;
   ContainerTokenSecretManager containerTokenSecretManager = 
     new ContainerTokenSecretManager();
-  AtomicInteger test = new AtomicInteger();
-  AtomicInteger notify = new AtomicInteger();
 
   private class TestNmLivelinessMonitor extends NMLivelinessMonitor {
     public TestNmLivelinessMonitor(Dispatcher dispatcher) {
@@ -68,22 +65,6 @@ public class TestNMExpiry {
       conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1000);
       super.init(conf);
     }
-    @Override
-    protected void expire(NodeId id) {
-        LOG.info("Expired  " + id);
-        if (test.addAndGet(1) == 2) {
-          try {
-            /* delay atleast 2 seconds to make sure the 3rd one does not expire
-             * 
-             */
-            Thread.sleep(2000);
-          } catch(InterruptedException ie){}
-          synchronized(notify) {
-            notify.addAndGet(1);
-            notify.notifyAll();
-          }
-        }
-    }
   }
 
   @Before
@@ -91,12 +72,12 @@ public class TestNMExpiry {
     Configuration conf = new Configuration();
     // Dispatcher that processes events inline
     Dispatcher dispatcher = new InlineDispatcher();
+    RMContext context = new RMContextImpl(new MemStore(), dispatcher, null,
+        null, null);
     dispatcher.register(SchedulerEventType.class,
         new InlineDispatcher.EmptyEventHandler());
     dispatcher.register(RMNodeEventType.class,
-        new InlineDispatcher.EmptyEventHandler());
-    RMContext context = new RMContextImpl(new MemStore(), dispatcher, null,
-        null);
+        new NodeEventDispatcher(context));
     NMLivelinessMonitor nmLivelinessMonitor = new TestNmLivelinessMonitor(
         dispatcher);
     nmLivelinessMonitor.init(conf);
@@ -166,6 +147,14 @@ public class TestNMExpiry {
     request2.setHttpPort(0);
     request2.setResource(capability);
     resourceTrackerService.registerNodeManager(request2);
+    
+    int waitCount = 0;
+    while(ClusterMetrics.getMetrics().getNumLostNMs()!=2 && waitCount ++<20){
+      synchronized (this) {
+        wait(100);
+      }
+    }
+    Assert.assertEquals(2, ClusterMetrics.getMetrics().getNumLostNMs());
 
     request3 = recordFactory
         .newRecordInstance(RegisterNodeManagerRequest.class);
@@ -175,20 +164,13 @@ public class TestNMExpiry {
     request3.setNodeId(nodeId3);
     request3.setHttpPort(0);
     request3.setResource(capability);
-    RegistrationResponse thirdNodeRegResponse = resourceTrackerService
+    resourceTrackerService
         .registerNodeManager(request3).getRegistrationResponse();
 
     /* test to see if hostanme 3 does not expire */
     stopT = false;
     new ThirdNodeHeartBeatThread().start();
-    int timeOut = 0;
-    synchronized (notify) {
-      while (notify.get() == 0 && timeOut++ < 30) {
-        notify.wait(1000);
-      }
-    }
-    Assert.assertEquals(2, test.get()); 
-
+    Assert.assertEquals(2,ClusterMetrics.getMetrics().getNumLostNMs());
     stopT = true;
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java Wed Nov  2 05:34:31 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.factory.pr
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.resourcemanager.NMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -65,8 +66,8 @@ public class TestRMNMRPCResponseId {
         ; // ignore
       }
     });
-    RMContext context = new RMContextImpl(new MemStore(), dispatcher, null,
-        null);
+    RMContext context = 
+        new RMContextImpl(new MemStore(), dispatcher, null, null, null);
     dispatcher.register(RMNodeEventType.class,
         new ResourceManager.NodeEventDispatcher(context));
     NodesListManager nodesListManager = new NodesListManager();
@@ -130,6 +131,6 @@ public class TestRMNMRPCResponseId {
     nodeStatus.setResponseId(0);
     response = resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest)
         .getHeartbeatResponse();
-    Assert.assertTrue(response.getReboot() == true);
+    Assert.assertTrue(NodeAction.REBOOT.equals(response.getNodeAction()));
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java Wed Nov  2 05:34:31 2011
@@ -33,6 +33,7 @@ public class MockRMApp implements RMApp 
   String name = MockApps.newAppName();
   String queue = MockApps.newQueue();
   long start = System.currentTimeMillis() - (int) (Math.random() * DT);
+  long submit = start - (int) (Math.random() * DT);
   long finish = 0;
   RMAppState state = RMAppState.NEW;
   int failCount = 0;
@@ -141,6 +142,11 @@ public class MockRMApp implements RMApp 
     return start;
   }
 
+  @Override
+  public long getSubmitTime() {
+    return submit;
+  }
+
   public void setStartTime(long time) {
     this.start = time;
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java Wed Nov  2 05:34:31 2011
@@ -21,7 +21,6 @@ package org.apache.hadoop.yarn.server.re
 import static org.mockito.Mockito.mock;
 
 import java.io.IOException;
-import java.util.List;
 
 import junit.framework.Assert;
 
@@ -51,7 +50,6 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 
 import org.junit.Before;
-import org.junit.After;
 import org.junit.Test;
 
 
@@ -61,7 +59,7 @@ public class TestRMAppTransitions {
   private RMContext rmContext;
   private static int maxRetries = 4;
   private static int appId = 1;
-  private AsyncDispatcher rmDispatcher;
+//  private AsyncDispatcher rmDispatcher;
 
   // ignore all the RM application attempt events
   private static final class TestApplicationAttemptEventDispatcher implements
@@ -121,7 +119,7 @@ public class TestRMAppTransitions {
         mock(ContainerAllocationExpirer.class);
     AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class);
     this.rmContext = new RMContextImpl(new MemStore(), rmDispatcher,
-        containerAllocationExpirer, amLivelinessMonitor);
+        containerAllocationExpirer, amLivelinessMonitor, null);
 
     rmDispatcher.register(RMAppAttemptEventType.class,
         new TestApplicationAttemptEventDispatcher(this.rmContext));
@@ -152,7 +150,7 @@ public class TestRMAppTransitions {
         conf, name, user,
         queue, submissionContext, clientTokenStr,
         appStore, scheduler,
-        masterService);
+        masterService, System.currentTimeMillis());
 
     testAppStartState(applicationId, user, name, queue, application);
     return application;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java Wed Nov  2 05:34:31 2011
@@ -138,7 +138,7 @@ public class TestRMAppAttemptTransitions
         mock(ContainerAllocationExpirer.class);
     AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class);
     rmContext = new RMContextImpl(new MemStore(), rmDispatcher,
-      containerAllocationExpirer, amLivelinessMonitor);
+      containerAllocationExpirer, amLivelinessMonitor, null);
     
     scheduler = mock(YarnScheduler.class);
     masterService = mock(ApplicationMasterService.class);
@@ -178,7 +178,7 @@ public class TestRMAppAttemptTransitions
     application = mock(RMApp.class);
     applicationAttempt = 
         new RMAppAttemptImpl(applicationAttemptId, null, rmContext, scheduler, 
-            masterService, submissionContext);
+            masterService, submissionContext, null);
     when(application.getCurrentAppAttempt()).thenReturn(applicationAttempt);
     when(application.getApplicationId()).thenReturn(applicationId);
     
@@ -328,7 +328,9 @@ public class TestRMAppAttemptTransitions
     assertEquals(container, applicationAttempt.getMasterContainer());
     assertEquals(host, applicationAttempt.getHost());
     assertEquals(rpcPort, applicationAttempt.getRpcPort());
-    assertEquals(trackingUrl, applicationAttempt.getTrackingUrl());
+    assertEquals(trackingUrl, applicationAttempt.getOriginalTrackingUrl());
+    assertEquals("null/proxy/"+applicationAttempt.getAppAttemptId().
+        getApplicationId()+"/", applicationAttempt.getTrackingUrl());
     
     // TODO - need to add more checks relevant to this state
   }
@@ -343,7 +345,9 @@ public class TestRMAppAttemptTransitions
     assertEquals(RMAppAttemptState.FINISHED, 
         applicationAttempt.getAppAttemptState());
     assertEquals(diagnostics, applicationAttempt.getDiagnostics());
-    assertEquals(trackingUrl, applicationAttempt.getTrackingUrl());
+    assertEquals(trackingUrl, applicationAttempt.getOriginalTrackingUrl());
+    assertEquals("null/proxy/"+applicationAttempt.getAppAttemptId().
+        getApplicationId()+"/", applicationAttempt.getTrackingUrl());
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertEquals(container, applicationAttempt.getMasterContainer());
     assertEquals(finalStatus, applicationAttempt.getFinalApplicationStatus());

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java Wed Nov  2 05:34:31 2011
@@ -18,22 +18,23 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.apache.hadoop.test.MockitoMaker.make;
+import static org.apache.hadoop.test.MockitoMaker.stub;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
 import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import static org.apache.hadoop.test.MetricsAsserts.*;
-import static org.apache.hadoop.test.MockitoMaker.*;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-
-import org.junit.Before;
 import org.junit.Test;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
 
 public class TestQueueMetrics {
   static final int GB = 1024; // MB
@@ -56,16 +57,16 @@ public class TestQueueMetrics {
     metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
     // Available resources is set externally, as it depends on dynamic
     // configurable cluster/queue resources
-    checkResources(queueSource, 0, 0, 100, 15, 5, 0, 0);
+    checkResources(queueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
 
     metrics.incrAppsRunning(user);
     checkApps(queueSource, 1, 0, 1, 0, 0, 0);
 
     metrics.allocateResources(user, 3, Resources.createResource(2*GB));
-    checkResources(queueSource, 6, 3, 100, 9, 2, 0, 0);
+    checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 0, 0);
 
     metrics.releaseResources(user, 1, Resources.createResource(2*GB));
-    checkResources(queueSource, 4, 2, 100, 9, 2, 0, 0);
+    checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
 
     metrics.finishApp(app, RMAppAttemptState.FINISHED);
     checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -91,20 +92,20 @@ public class TestQueueMetrics {
     metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
     // Available resources is set externally, as it depends on dynamic
     // configurable cluster/queue resources
-    checkResources(queueSource, 0, 0, 100, 15, 5, 0, 0);
-    checkResources(userSource, 0, 0, 10, 15, 5, 0, 0);
+    checkResources(queueSource, 0, 0, 0, 0,  100, 15, 5, 0, 0);
+    checkResources(userSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
 
     metrics.incrAppsRunning(user);
     checkApps(queueSource, 1, 0, 1, 0, 0, 0);
     checkApps(userSource, 1, 0, 1, 0, 0, 0);
 
     metrics.allocateResources(user, 3, Resources.createResource(2*GB));
-    checkResources(queueSource, 6, 3, 100, 9, 2, 0, 0);
-    checkResources(userSource, 6, 3, 10, 9, 2, 0, 0);
+    checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 0, 0);
+    checkResources(userSource, 6, 3, 3, 0, 10, 9, 2, 0, 0);
 
     metrics.releaseResources(user, 1, Resources.createResource(2*GB));
-    checkResources(queueSource, 4, 2, 100, 9, 2, 0, 0);
-    checkResources(userSource, 4, 2, 10, 9, 2, 0, 0);
+    checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
+    checkResources(userSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
 
     metrics.finishApp(app, RMAppAttemptState.FINISHED);
     checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -140,10 +141,10 @@ public class TestQueueMetrics {
     parentMetrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
     metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
     metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
-    checkResources(queueSource, 0, 0, 100, 15, 5, 0, 0);
-    checkResources(parentQueueSource, 0, 0, 100, 15, 5, 0, 0);
-    checkResources(userSource, 0, 0, 10, 15, 5, 0, 0);
-    checkResources(parentUserSource, 0, 0, 10, 15, 5, 0, 0);
+    checkResources(queueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
+    checkResources(parentQueueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
+    checkResources(userSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
+    checkResources(parentUserSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
 
     metrics.incrAppsRunning(user);
     checkApps(queueSource, 1, 0, 1, 0, 0, 0);
@@ -153,17 +154,17 @@ public class TestQueueMetrics {
     metrics.reserveResource(user, Resources.createResource(3*GB));
     // Available resources is set externally, as it depends on dynamic
     // configurable cluster/queue resources
-    checkResources(queueSource, 6, 3, 100, 9, 2, 3, 1);
-    checkResources(parentQueueSource, 6, 3, 100, 9, 2, 3, 1);
-    checkResources(userSource, 6, 3, 10, 9, 2, 3, 1);
-    checkResources(parentUserSource, 6, 3, 10, 9, 2, 3, 1);
+    checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 3, 1);
+    checkResources(parentQueueSource, 6, 3, 3, 0,  100, 9, 2, 3, 1);
+    checkResources(userSource, 6, 3, 3, 0, 10, 9, 2, 3, 1);
+    checkResources(parentUserSource, 6, 3, 3, 0, 10, 9, 2, 3, 1);
 
     metrics.releaseResources(user, 1, Resources.createResource(2*GB));
     metrics.unreserveResource(user, Resources.createResource(3*GB));
-    checkResources(queueSource, 4, 2, 100, 9, 2, 0, 0);
-    checkResources(parentQueueSource, 4, 2, 100, 9, 2, 0, 0);
-    checkResources(userSource, 4, 2, 10, 9, 2, 0, 0);
-    checkResources(parentUserSource, 4, 2, 10, 9, 2, 0, 0);
+    checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
+    checkResources(parentQueueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
+    checkResources(userSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
+    checkResources(parentUserSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
 
     metrics.finishApp(app, RMAppAttemptState.FINISHED);
     checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -184,11 +185,13 @@ public class TestQueueMetrics {
   }
 
   public static void checkResources(MetricsSource source, int allocGB,
-      int allocCtnrs, int availGB, int pendingGB, int pendingCtnrs,
+      int allocCtnrs, long aggreAllocCtnrs, long aggreReleasedCtnrs, int availGB, int pendingGB, int pendingCtnrs,
       int reservedGB, int reservedCtnrs) {
     MetricsRecordBuilder rb = getMetrics(source);
     assertGauge("AllocatedGB", allocGB, rb);
     assertGauge("AllocatedContainers", allocCtnrs, rb);
+    assertCounter("AggregateContainersAllocated", aggreAllocCtnrs, rb);
+    assertCounter("AggregateContainersReleased", aggreReleasedCtnrs, rb);
     assertGauge("AvailableGB", availGB, rb);
     assertGauge("PendingGB", pendingGB, rb);
     assertGauge("PendingContainers", pendingCtnrs, rb);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java Wed Nov  2 05:34:31 2011
@@ -14,6 +14,7 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -101,8 +102,10 @@ public class TestApplicationLimits {
     
     CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
     when(csContext.getConfiguration()).thenReturn(csConf);
-    when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
-    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB));
+    when(csContext.getMinimumResourceCapability()).
+        thenReturn(Resources.createResource(GB));
+    when(csContext.getMaximumResourceCapability()).
+        thenReturn(Resources.createResource(16*GB));
     
     // Say cluster has 100 nodes of 16G each
     Resource clusterResource = Resources.createResource(100 * 16 * GB);
@@ -227,6 +230,76 @@ public class TestApplicationLimits {
     assertEquals(0, queue.getNumPendingApplications(user_1));
   }
 
+  @Test
+  public void testHeadroom() throws Exception {
+    CapacitySchedulerConfiguration csConf = 
+        new CapacitySchedulerConfiguration();
+    csConf.setUserLimit(CapacityScheduler.ROOT + "." + A, 25);
+    setupQueueConfiguration(csConf);
+    
+    CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
+    when(csContext.getConfiguration()).thenReturn(csConf);
+    when(csContext.getMinimumResourceCapability()).
+        thenReturn(Resources.createResource(GB));
+    when(csContext.getMaximumResourceCapability()).
+        thenReturn(Resources.createResource(16*GB));
+    
+    // Say cluster has 100 nodes of 16G each
+    Resource clusterResource = Resources.createResource(100 * 16 * GB);
+    when(csContext.getClusterResources()).thenReturn(clusterResource);
+    
+    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
+    CapacityScheduler.parseQueue(csContext, csConf, null, "root", 
+        queues, queues, 
+        CapacityScheduler.queueComparator, 
+        CapacityScheduler.applicationComparator, 
+        TestUtils.spyHook);
+
+    // Manipulate queue 'a'
+    LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue)queues.get(A));
+    
+    String host_0 = "host_0";
+    String rack_0 = "rack_0";
+    SchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 16*GB);
+
+    final String user_0 = "user_0";
+    final String user_1 = "user_1";
+    
+    int APPLICATION_ID = 0;
+
+    // Submit first application from user_0, check headroom
+    SchedulerApp app_0_0 = getMockApplication(APPLICATION_ID++, user_0);
+    queue.submitApplication(app_0_0, user_0, A);
+    queue.assignContainers(clusterResource, node_0); // Schedule to compute
+    Resource expectedHeadroom = Resources.createResource(10*16*GB);
+    verify(app_0_0).setAvailableResourceLimit(eq(expectedHeadroom));
+
+    // Submit second application from user_0, check headroom
+    SchedulerApp app_0_1 = getMockApplication(APPLICATION_ID++, user_0);
+    queue.submitApplication(app_0_1, user_0, A);
+    queue.assignContainers(clusterResource, node_0); // Schedule to compute
+    verify(app_0_0, times(2)).setAvailableResourceLimit(eq(expectedHeadroom));
+    verify(app_0_1).setAvailableResourceLimit(eq(expectedHeadroom));// no change
+    
+    // Submit first application from user_1, check  for new headroom
+    SchedulerApp app_1_0 = getMockApplication(APPLICATION_ID++, user_1);
+    queue.submitApplication(app_1_0, user_1, A);
+    queue.assignContainers(clusterResource, node_0); // Schedule to compute
+    expectedHeadroom = Resources.createResource(10*16*GB / 2); // changes
+    verify(app_0_0).setAvailableResourceLimit(eq(expectedHeadroom));
+    verify(app_0_1).setAvailableResourceLimit(eq(expectedHeadroom));
+    verify(app_1_0).setAvailableResourceLimit(eq(expectedHeadroom));
+    
+    // Now reduce cluster size and check for the smaller headroom
+    clusterResource = Resources.createResource(90*16*GB);
+    queue.assignContainers(clusterResource, node_0); // Schedule to compute
+    expectedHeadroom = Resources.createResource(9*16*GB / 2); // changes
+    verify(app_0_0).setAvailableResourceLimit(eq(expectedHeadroom));
+    verify(app_0_1).setAvailableResourceLimit(eq(expectedHeadroom));
+    verify(app_1_0).setAvailableResourceLimit(eq(expectedHeadroom));
+  }
+  
+
   @After
   public void tearDown() {
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Wed Nov  2 05:34:31 2011
@@ -117,7 +117,7 @@ public class TestLeafQueue {
     LOG.info("Setup top-level queues a and b");
   }
 
-  private LeafQueue stubLeafQueue(LeafQueue queue) {
+  static LeafQueue stubLeafQueue(LeafQueue queue) {
     
     // Mock some methods for ease in these unit tests
     
@@ -158,6 +158,52 @@ public class TestLeafQueue {
     return queue;
   }
   
+ 
+  @Test
+  public void testSingleQueueOneUserMetrics() throws Exception {
+
+    // Manipulate queue 'a'
+    LeafQueue a = stubLeafQueue((LeafQueue)queues.get(B));
+
+    // Users
+    final String user_0 = "user_0";
+
+    // Submit applications
+    final ApplicationAttemptId appAttemptId_0 = 
+        TestUtils.getMockApplicationAttemptId(0, 0); 
+    SchedulerApp app_0 = 
+        new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null);
+    a.submitApplication(app_0, user_0, B);
+
+    final ApplicationAttemptId appAttemptId_1 = 
+        TestUtils.getMockApplicationAttemptId(1, 0); 
+    SchedulerApp app_1 = 
+        new SchedulerApp(appAttemptId_1, user_0, a, rmContext, null);
+    a.submitApplication(app_1, user_0, B);  // same user
+
+    
+    // Setup some nodes
+    String host_0 = "host_0";
+    SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
+    
+    final int numNodes = 1;
+    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+    // Setup resource-requests
+    Priority priority = TestUtils.createMockPriority(1);
+    app_0.updateResourceRequests(Collections.singletonList(
+            TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 3, priority, 
+                recordFactory))); 
+
+    // Start testing...
+    
+    // Only 1 container
+    a.assignContainers(clusterResource, node_0);
+    assertEquals(7, a.getMetrics().getAvailableGB());
+  }
+
+
   @Test
   public void testSingleQueueWithOneUser() throws Exception {
 
@@ -180,6 +226,7 @@ public class TestLeafQueue {
         new SchedulerApp(appAttemptId_1, user_0, a, rmContext, null);
     a.submitApplication(app_1, user_0, A);  // same user
 
+    
     // Setup some nodes
     String host_0 = "host_0";
     SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
@@ -207,6 +254,7 @@ public class TestLeafQueue {
     assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
     assertEquals(0, a.getMetrics().getReservedGB());
     assertEquals(1, a.getMetrics().getAllocatedGB());
+    assertEquals(0, a.getMetrics().getAvailableGB());
 
     // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
     // you can get one container more than user-limit
@@ -273,6 +321,7 @@ public class TestLeafQueue {
     assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
     assertEquals(0, a.getMetrics().getReservedGB());
     assertEquals(0, a.getMetrics().getAllocatedGB());
+    assertEquals(1, a.getMetrics().getAvailableGB());
   }
   
   @Test
@@ -494,6 +543,7 @@ public class TestLeafQueue {
     assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
     assertEquals(0, a.getMetrics().getReservedGB());
     assertEquals(1, a.getMetrics().getAllocatedGB());
+    assertEquals(0, a.getMetrics().getAvailableGB());
 
     // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
     // you can get one container more than user-limit

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java Wed Nov  2 05:34:31 2011
@@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 
 import org.junit.After;
@@ -81,6 +82,13 @@ public class TestParentQueue {
     LOG.info("Setup top-level queues a and b");
   }
 
+  private SchedulerApp getMockApplication(int appId, String user) {
+    SchedulerApp application = mock(SchedulerApp.class);
+    doReturn(user).when(application).getUser();
+    doReturn(null).when(application).getHeadroom();
+    return application;
+  }
+
   private void stubQueueAllocation(final CSQueue queue, 
       final Resource clusterResource, final SchedulerNode node, 
       final int allocation) {
@@ -100,7 +108,8 @@ public class TestParentQueue {
           ((ParentQueue)queue).allocateResource(clusterResource, 
               allocatedResource);
         } else {
-          ((LeafQueue)queue).allocateResource(clusterResource, "", 
+          SchedulerApp app1 = getMockApplication(0, "");
+          ((LeafQueue)queue).allocateResource(clusterResource, app1, 
               allocatedResource);
         }
         

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java Wed Nov  2 05:34:31 2011
@@ -75,7 +75,7 @@ public class TestUtils {
         new ContainerAllocationExpirer(nullDispatcher);
     
     RMContext rmContext = 
-        new RMContextImpl(null, nullDispatcher, cae, null);
+        new RMContextImpl(null, nullDispatcher, cae, null, null);
     
     return rmContext;
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java Wed Nov  2 05:34:31 2011
@@ -43,7 +43,7 @@ public class TestNodesPage {
     final int numberOfNodesPerRack = 2;
     // Number of Actual Table Headers for NodesPage.NodesBlock might change in
     // future. In that case this value should be adjusted to the new value.
-    final int numberOfThInMetricsTable = 9;
+    final int numberOfThInMetricsTable = 10;
     final int numberOfActualTableHeaders = 10;
 
     Injector injector = WebAppTests.createMockInjector(RMContext.class,

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java Wed Nov  2 05:34:31 2011
@@ -28,6 +28,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
@@ -42,6 +43,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.webapp.WebApps;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
 import org.junit.Test;
@@ -54,8 +56,17 @@ import com.google.inject.Module;
 public class TestRMWebApp {
   static final int GiB = 1024; // MiB
 
-  @Test public void testControllerIndex() {
-    Injector injector = WebAppTests.createMockInjector(this);
+  @Test
+  public void testControllerIndex() {
+    Injector injector = WebAppTests.createMockInjector(TestRMWebApp.class,
+        this, new Module() {
+
+          @Override
+          public void configure(Binder binder) {
+            binder.bind(ApplicationACLsManager.class).toInstance(
+                new ApplicationACLsManager(new Configuration()));
+          }
+        });
     RmController c = injector.getInstance(RmController.class);
     c.index();
     assertEquals("Applications", c.get(TITLE, "unknown"));
@@ -109,7 +120,7 @@ public class TestRMWebApp {
     for (RMNode node : nodes) {
       nodesMap.put(node.getNodeID(), node);
     }
-   return new RMContextImpl(new MemStore(), null, null, null) {
+   return new RMContextImpl(new MemStore(), null, null, null, null) {
       @Override
       public ConcurrentMap<ApplicationId, RMApp> getRMApps() {
         return applicationsMaps;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml Wed Nov  2 05:34:31 2011
@@ -16,14 +16,15 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>${yarn.version}</version>
+    <version>0.24.0-SNAPSHOT</version>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-yarn-server-tests</artifactId>
+  <version>0.24.0-SNAPSHOT</version>
   <name>hadoop-yarn-server-tests</name>
 
   <properties>
-    <install.file>${project.artifact.file}</install.file>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
     <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
   </properties>
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java Wed Nov  2 05:34:31 2011
@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.NodeHealthCheckerService;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -50,6 +51,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.service.CompositeService;
+import org.apache.hadoop.yarn.service.Service.STATE;
 
 public class MiniYARNCluster extends CompositeService {
 
@@ -60,15 +62,19 @@ public class MiniYARNCluster extends Com
     DefaultMetricsSystem.setMiniClusterMode(true);
   }
 
-  private NodeManager nodeManager;
+  private NodeManager[] nodeManagers;
   private ResourceManager resourceManager;
 
   private ResourceManagerWrapper resourceManagerWrapper;
-  private NodeManagerWrapper nodeManagerWrapper;
   
   private File testWorkDir;
 
   public MiniYARNCluster(String testName) {
+    //default number of nodeManagers = 1
+    this(testName, 1);
+  }
+
+  public MiniYARNCluster(String testName, int noOfNodeManagers) {
     super(testName);
     this.testWorkDir = new File("target", testName);
     try {
@@ -80,8 +86,11 @@ public class MiniYARNCluster extends Com
     } 
     resourceManagerWrapper = new ResourceManagerWrapper();
     addService(resourceManagerWrapper);
-    nodeManagerWrapper = new NodeManagerWrapper();
-    addService(nodeManagerWrapper);
+    nodeManagers = new CustomNodeManager[noOfNodeManagers];
+    for(int index = 0; index < noOfNodeManagers; index++) {
+      addService(new NodeManagerWrapper(index));
+      nodeManagers[index] = new CustomNodeManager();
+    }
   }
 
   public File getTestWorkDir() {
@@ -92,10 +101,10 @@ public class MiniYARNCluster extends Com
     return this.resourceManager;
   }
 
-  public NodeManager getNodeManager() {
-    return this.nodeManager;
+  public NodeManager getNodeManager(int i) {
+    return this.nodeManagers[i];
   }
-
+  
   private class ResourceManagerWrapper extends AbstractService {
     public ResourceManagerWrapper() {
       super(ResourceManagerWrapper.class.getName());
@@ -145,106 +154,60 @@ public class MiniYARNCluster extends Com
   }
 
   private class NodeManagerWrapper extends AbstractService {
-    public NodeManagerWrapper() {
-      super(NodeManagerWrapper.class.getName());
+    int index = 0;
+
+    public NodeManagerWrapper(int i) {
+      super(NodeManagerWrapper.class.getName() + "_" + i);
+      index = i;
     }
 
+    public synchronized void init(Configuration conf) {                          
+      Configuration config = new Configuration(conf);                            
+      super.init(config);                                                        
+    }                                                                            
+
     public synchronized void start() {
       try {
-        File localDir =
-            new File(testWorkDir, MiniYARNCluster.this.getName() + "-localDir");
+        File localDir = new File(testWorkDir, MiniYARNCluster.this.getName()
+            + "-localDir-nm-" + index);
         localDir.mkdir();
         LOG.info("Created localDir in " + localDir.getAbsolutePath());
-        getConfig().set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
+        getConfig().set(YarnConfiguration.NM_LOCAL_DIRS,
+            localDir.getAbsolutePath());
         File logDir =
             new File(testWorkDir, MiniYARNCluster.this.getName()
-                + "-logDir");
+                + "-logDir-nm-" + index);
         File remoteLogDir =
-          new File(testWorkDir, MiniYARNCluster.this.getName()
-              + "-remoteLogDir");
+            new File(testWorkDir, MiniYARNCluster.this.getName()
+                + "-remoteLogDir-nm-" + index);
         logDir.mkdir();
         remoteLogDir.mkdir();
         LOG.info("Created logDir in " + logDir.getAbsolutePath());
-        getConfig().set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
+        getConfig().set(YarnConfiguration.NM_LOG_DIRS,
+            logDir.getAbsolutePath());
         getConfig().set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
-            remoteLogDir.getAbsolutePath());
-        getConfig().setInt(YarnConfiguration.NM_VMEM_GB, 4); // By default AM + 2 containers
-        nodeManager = new NodeManager() {
-
-          @Override
-          protected void doSecureLogin() throws IOException {
-            // Don't try to login using keytab in the testcase.
-          };
-
-          @Override
-          protected NodeStatusUpdater createNodeStatusUpdater(Context context,
-              Dispatcher dispatcher, NodeHealthCheckerService healthChecker,
-              ContainerTokenSecretManager containerTokenSecretManager) {
-            return new NodeStatusUpdaterImpl(context, dispatcher,
-                healthChecker, metrics, containerTokenSecretManager) {
-              @Override
-              protected ResourceTracker getRMClient() {
-                final ResourceTrackerService rt = resourceManager
-                    .getResourceTrackerService();
-                final RecordFactory recordFactory =
-                  RecordFactoryProvider.getRecordFactory(null);
-
-                // For in-process communication without RPC
-                return new ResourceTracker() {
-
-                  @Override
-                  public NodeHeartbeatResponse nodeHeartbeat(
-                      NodeHeartbeatRequest request) throws YarnRemoteException {
-                    NodeHeartbeatResponse response = recordFactory.newRecordInstance(
-                        NodeHeartbeatResponse.class);
-                    try {
-                      response.setHeartbeatResponse(rt.nodeHeartbeat(request)
-                          .getHeartbeatResponse());
-                    } catch (IOException ioe) {
-                      LOG.info("Exception in heartbeat from node " + 
-                          request.getNodeStatus().getNodeId(), ioe);
-                      throw RPCUtil.getRemoteException(ioe);
-                    }
-                    return response;
-                  }
-
-                  @Override
-                  public RegisterNodeManagerResponse registerNodeManager(
-                      RegisterNodeManagerRequest request)
-                      throws YarnRemoteException {
-                    RegisterNodeManagerResponse response = recordFactory.newRecordInstance(
-                        RegisterNodeManagerResponse.class);
-                    try {
-                      response.setRegistrationResponse(rt
-                          .registerNodeManager(request)
-                          .getRegistrationResponse());
-                    } catch (IOException ioe) {
-                      LOG.info("Exception in node registration from "
-                          + request.getNodeId().toString(), ioe);
-                      throw RPCUtil.getRemoteException(ioe);
-                    }
-                    return response;
-                  }
-                };
-              };
-            };
-          };
-        };
-        nodeManager.init(getConfig());
+            	remoteLogDir.getAbsolutePath());
+        // By default AM + 2 containers
+        getConfig().setInt(YarnConfiguration.NM_PMEM_MB, 4*1024);
+        getConfig().set(YarnConfiguration.NM_ADDRESS, "0.0.0.0:0");
+        getConfig().set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:0");
+        getConfig().set(YarnConfiguration.NM_WEBAPP_ADDRESS, "0.0.0.0:0");
+        LOG.info("Starting NM: " + index);
+        nodeManagers[index].init(getConfig());
         new Thread() {
           public void run() {
-            nodeManager.start();
+            nodeManagers[index].start();
           };
         }.start();
         int waitCount = 0;
-        while (nodeManager.getServiceState() == STATE.INITED
+        while (nodeManagers[index].getServiceState() == STATE.INITED
             && waitCount++ < 60) {
-          LOG.info("Waiting for NM to start...");
+          LOG.info("Waiting for NM " + index + " to start...");
           Thread.sleep(1000);
         }
-        if (nodeManager.getServiceState() != STATE.STARTED) {
+        if (nodeManagers[index].getServiceState() != STATE.STARTED) {
           // RM could have failed.
-          throw new IOException("NodeManager failed to start");
+          throw new IOException("NodeManager " + index + " failed to start");
         }
         super.start();
       } catch (Throwable t) {
@@ -254,10 +217,71 @@ public class MiniYARNCluster extends Com
 
     @Override
     public synchronized void stop() {
-      if (nodeManager != null) {
-        nodeManager.stop();
+      if (nodeManagers[index] != null) {
+        nodeManagers[index].stop();
       }
       super.stop();
     }
   }
+  
+  private class CustomNodeManager extends NodeManager {
+    @Override
+    protected void doSecureLogin() throws IOException {
+      // Don't try to login using keytab in the testcase.
+    };
+
+    @Override
+    protected NodeStatusUpdater createNodeStatusUpdater(Context context,
+        Dispatcher dispatcher, NodeHealthCheckerService healthChecker,
+        ContainerTokenSecretManager containerTokenSecretManager) {
+      return new NodeStatusUpdaterImpl(context, dispatcher,
+          healthChecker, metrics, containerTokenSecretManager) {
+        @Override
+        protected ResourceTracker getRMClient() {
+          final ResourceTrackerService rt = resourceManager
+              .getResourceTrackerService();
+          final RecordFactory recordFactory =
+            RecordFactoryProvider.getRecordFactory(null);
+
+          // For in-process communication without RPC
+          return new ResourceTracker() {
+
+            @Override
+            public NodeHeartbeatResponse nodeHeartbeat(
+                NodeHeartbeatRequest request) throws YarnRemoteException {
+              NodeHeartbeatResponse response = recordFactory.newRecordInstance(
+                  NodeHeartbeatResponse.class);
+              try {
+                response.setHeartbeatResponse(rt.nodeHeartbeat(request)
+                    .getHeartbeatResponse());
+              } catch (IOException ioe) {
+                LOG.info("Exception in heartbeat from node " + 
+                    request.getNodeStatus().getNodeId(), ioe);
+                throw RPCUtil.getRemoteException(ioe);
+              }
+              return response;
+            }
+
+            @Override
+            public RegisterNodeManagerResponse registerNodeManager(
+                RegisterNodeManagerRequest request)
+                throws YarnRemoteException {
+              RegisterNodeManagerResponse response = recordFactory.
+                  newRecordInstance(RegisterNodeManagerResponse.class);
+              try {
+                response.setRegistrationResponse(rt
+                    .registerNodeManager(request)
+                    .getRegistrationResponse());
+              } catch (IOException ioe) {
+                LOG.info("Exception in node registration from "
+                    + request.getNodeId().toString(), ioe);
+                throw RPCUtil.getRemoteException(ioe);
+              }
+              return response;
+            }
+          };
+        };
+      };
+    };
+  }
 }

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Wed Nov  2 05:34:31 2011
@@ -0,0 +1,4 @@
+.classpath
+.project
+target
+.settings

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/pom.xml Wed Nov  2 05:34:31 2011
@@ -16,11 +16,12 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>${yarn.version}</version>
+    <version>0.24.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-yarn-server</artifactId>
+  <version>0.24.0-SNAPSHOT</version>
   <name>hadoop-yarn-server</name>
   <packaging>pom</packaging>
 
@@ -36,6 +37,7 @@
   <modules>
     <module>hadoop-yarn-server-common</module>
     <module>hadoop-yarn-server-nodemanager</module>
+    <module>hadoop-yarn-server-web-proxy</module>
     <module>hadoop-yarn-server-resourcemanager</module>
     <module>hadoop-yarn-server-tests</module>
   </modules>

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Wed Nov  2 05:34:31 2011
@@ -0,0 +1,4 @@
+target
+.classpath
+.project
+.settings

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/pom.xml Wed Nov  2 05:34:31 2011
@@ -16,15 +16,16 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>${yarn.version}</version>
+    <version>0.24.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-yarn-site</artifactId>
+  <version>0.24.0-SNAPSHOT</version>
   <name>hadoop-yarn-site</name>
 
   <properties>
-    <install.file>${project.artifact.file}</install.file>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
     <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
   </properties>
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm Wed Nov  2 05:34:31 2011
@@ -22,7 +22,6 @@ Hadoop MapReduce Next Generation - Capac
 
 %{toc|section=1|fromDepth=0}
 
-
 * {Purpose} 
 
   This document describes the <<<CapacityScheduler>>>, a pluggable scheduler 
@@ -141,7 +140,7 @@ Hadoop MapReduce Next Generation - Capac
 *--------------------------------------+--------------------------------------+
 | <<<yarn.resourcemanager.scheduler.class>>> | |
 | | <<<org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.CapacityScheduler>>> |
-*--------------------------------------------+--------------------------------------------+
+*--------------------------------------+--------------------------------------+
 
   * Setting up <queues>
    

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm Wed Nov  2 05:34:31 2011
@@ -20,6 +20,8 @@ Hadoop MapReduce Next Generation - Setti
 
   \[ {{{./index.html}Go Back}} \]
 
+%{toc|section=1|fromDepth=0}
+
 * Mapreduce Tarball
 
   You should be able to obtain the MapReduce tarball from the release.
@@ -28,11 +30,11 @@ Hadoop MapReduce Next Generation - Setti
 +---+
 $ mvn clean install -DskipTests
 $ cd hadoop-mapreduce-project
-$ mvn clean install assembly:assembly 
+$ mvn clean install assembly:assembly -Pnative
 +---+
   <<NOTE:>> You will need protoc installed of version 2.4.1 or greater.
 
-  To ignore the native builds in mapreduce you can use <<<-P-cbuild>>> argument
+  To ignore the native builds in mapreduce you can omit the <<<-Pnative>>> argument
   for maven. The tarball should be available in <<<target/>>> directory. 
 
   
@@ -119,8 +121,8 @@ Add the following configs to your <<<yar
   </property>  
 
   <property>
-    <name>yarn.nodemanager.resource.memory-gb</name>
-    <value>10</value>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>10240</value>
     <description>the amount of memory on the NodeManager in GB</description>
   </property>