You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by su...@apache.org on 2011/09/29 02:42:55 UTC

svn commit: r1177130 [5/7] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ conf/ hadoop-mapreduce-client/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apa...

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml Thu Sep 29 00:42:47 2011
@@ -103,6 +103,39 @@
         <activeByDefault>true</activeByDefault>
       </activation>
     </profile>
+    <profile>
+      <id>visualize</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
+            <version>1.2</version>
+            <executions>
+              <execution>
+                <phase>compile</phase>
+                <goals>
+                  <goal>java</goal>
+                </goals>
+                <configuration>
+                  <mainClass>org.apache.hadoop.yarn.util.VisualizeStateMachine</mainClass>
+                  <arguments>
+                    <argument>NodeManager</argument>
+                    <argument>org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl,
+                       org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl,
+                       org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource</argument>
+                    <argument>NodeManager.gv</argument>
+                  </arguments>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
   </profiles>
 
   <build>

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java Thu Sep 29 00:42:47 2011
@@ -133,8 +133,10 @@ public class DefaultContainerExecutor ex
       String[] command = 
           new String[] { "bash", "-c", launchDst.toUri().getPath().toString() };
       LOG.info("launchContainer: " + Arrays.toString(command));
-      shExec = new ShellCommandExecutor(command,
-          new File(containerWorkDir.toUri().getPath()));
+      shExec = new ShellCommandExecutor(
+          command,
+          new File(containerWorkDir.toUri().getPath()), 
+          container.getLaunchContext().getEnvironment());      // sanitized env
       launchCommandObjs.put(containerId, shExec);
       shExec.execute();
     } catch (IOException e) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java Thu Sep 29 00:42:47 2011
@@ -161,7 +161,11 @@ public class LinuxContainerExecutor exte
                     nmPrivateCotainerScriptPath.toUri().getPath().toString(),
                     nmPrivateTokensPath.toUri().getPath().toString()));
     String[] commandArray = command.toArray(new String[command.size()]);
-    ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray);
+    ShellCommandExecutor shExec = 
+        new ShellCommandExecutor(
+            commandArray,
+            null,                                              // NM's cwd
+            container.getLaunchContext().getEnvironment());    // sanitized env
     launchCommandObjs.put(containerId, shExec);
     // DEBUG
     LOG.info("launchContainer: " + Arrays.toString(commandArray));

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java Thu Sep 29 00:42:47 2011
@@ -101,7 +101,7 @@ public class NodeStatusUpdaterImpl exten
   public synchronized void init(Configuration conf) {
     this.rmAddress =
         conf.get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
-            YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
+            YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS);
     this.heartBeatInterval =
         conf.getLong(YarnConfiguration.NM_TO_RM_HEARTBEAT_INTERVAL_MS,
             YarnConfiguration.DEFAULT_NM_TO_RM_HEARTBEAT_INTERVAL_MS);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java Thu Sep 29 00:42:47 2011
@@ -158,10 +158,12 @@ public class ContainerImpl implements Co
         ContainerEventType.CONTAINER_LAUNCHED, new LaunchTransition())
     .addTransition(ContainerState.LOCALIZED, ContainerState.EXITED_WITH_FAILURE,
         ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
-        new ExitedWithFailureTransition())
+        new ExitedWithFailureTransition(true))
     .addTransition(ContainerState.LOCALIZED, ContainerState.LOCALIZED,
        ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
        UPDATE_DIAGNOSTICS_TRANSITION)
+       // TODO race: Can lead to a CONTAINER_LAUNCHED event at state KILLING, 
+       // and a container which will never be killed by the NM.
     .addTransition(ContainerState.LOCALIZED, ContainerState.KILLING,
         ContainerEventType.KILL_CONTAINER, new KillTransition())
 
@@ -169,16 +171,19 @@ public class ContainerImpl implements Co
     .addTransition(ContainerState.RUNNING,
         ContainerState.EXITED_WITH_SUCCESS,
         ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
-        new ExitedWithSuccessTransition())
+        new ExitedWithSuccessTransition(true))
     .addTransition(ContainerState.RUNNING,
         ContainerState.EXITED_WITH_FAILURE,
         ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
-        new ExitedWithFailureTransition())
+        new ExitedWithFailureTransition(true))
     .addTransition(ContainerState.RUNNING, ContainerState.RUNNING,
        ContainerEventType.UPDATE_DIAGNOSTICS_MSG,
        UPDATE_DIAGNOSTICS_TRANSITION)
     .addTransition(ContainerState.RUNNING, ContainerState.KILLING,
         ContainerEventType.KILL_CONTAINER, new KillTransition())
+    .addTransition(ContainerState.RUNNING, ContainerState.EXITED_WITH_FAILURE,
+        ContainerEventType.CONTAINER_KILLED_ON_REQUEST,
+        new KilledExternallyTransition()) 
 
     // From CONTAINER_EXITED_WITH_SUCCESS State
     .addTransition(ContainerState.EXITED_WITH_SUCCESS, ContainerState.DONE,
@@ -220,10 +225,10 @@ public class ContainerImpl implements Co
         ContainerEventType.KILL_CONTAINER)
     .addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_SUCCESS,
         ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
-        new ExitedWithSuccessTransition())
+        new ExitedWithSuccessTransition(false))
     .addTransition(ContainerState.KILLING, ContainerState.EXITED_WITH_FAILURE,
         ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,
-        new ExitedWithFailureTransition())
+        new ExitedWithFailureTransition(false))
     .addTransition(ContainerState.KILLING,
             ContainerState.DONE,
             ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP,
@@ -551,18 +556,41 @@ public class ContainerImpl implements Co
     }
   }
 
+  @SuppressWarnings("unchecked")  // dispatcher not typed
   static class ExitedWithSuccessTransition extends ContainerTransition {
+
+    boolean clCleanupRequired;
+
+    public ExitedWithSuccessTransition(boolean clCleanupRequired) {
+      this.clCleanupRequired = clCleanupRequired;
+    }
+
     @Override
     public void transition(ContainerImpl container, ContainerEvent event) {
+      // Set exit code to 0 on success    	
+      container.exitCode = 0;
+    	
       // TODO: Add containerWorkDir to the deletion service.
 
-      // Inform the localizer to decrement reference counts and cleanup
-      // resources.
+      if (clCleanupRequired) {
+        container.dispatcher.getEventHandler().handle(
+            new ContainersLauncherEvent(container,
+                ContainersLauncherEventType.CLEANUP_CONTAINER));
+      }
+
       container.cleanup();
     }
   }
 
+  @SuppressWarnings("unchecked")  // dispatcher not typed
   static class ExitedWithFailureTransition extends ContainerTransition {
+
+    boolean clCleanupRequired;
+
+    public ExitedWithFailureTransition(boolean clCleanupRequired) {
+      this.clCleanupRequired = clCleanupRequired;
+    }
+
     @Override
     public void transition(ContainerImpl container, ContainerEvent event) {
       ContainerExitEvent exitEvent = (ContainerExitEvent) event;
@@ -571,12 +599,28 @@ public class ContainerImpl implements Co
       // TODO: Add containerWorkDir to the deletion service.
       // TODO: Add containerOuputDir to the deletion service.
 
-      // Inform the localizer to decrement reference counts and cleanup
-      // resources.
+      if (clCleanupRequired) {
+        container.dispatcher.getEventHandler().handle(
+            new ContainersLauncherEvent(container,
+                ContainersLauncherEventType.CLEANUP_CONTAINER));
+      }
+
       container.cleanup();
     }
   }
 
+  static class KilledExternallyTransition extends ExitedWithFailureTransition {
+    KilledExternallyTransition() {
+      super(true);
+    }
+
+    @Override
+    public void transition(ContainerImpl container, ContainerEvent event) {
+      super.transition(container, event);
+      container.diagnostics.append("Killed by external signal\n");
+    }
+  }
+
   static class ResourceFailedTransition implements
       SingleArcTransition<ContainerImpl, ContainerEvent> {
     @Override

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java Thu Sep 29 00:42:47 2011
@@ -44,6 +44,7 @@ import org.apache.hadoop.security.Creden
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -89,7 +90,6 @@ public class ContainerLaunch implements 
     final Map<Path,String> localResources = container.getLocalizedResources();
     String containerIdStr = ConverterUtils.toString(container.getContainerID());
     final String user = launchContext.getUser();
-    final Map<String,String> env = launchContext.getEnvironment();
     final List<String> command = launchContext.getCommands();
     int ret = -1;
 
@@ -109,16 +109,16 @@ public class ContainerLaunch implements 
       }
       launchContext.setCommands(newCmds);
 
-      Map<String, String> envs = launchContext.getEnvironment();
-      Map<String, String> newEnvs = new HashMap<String, String>(envs.size());
-      for (Entry<String, String> entry : envs.entrySet()) {
-        newEnvs.put(
-            entry.getKey(),
-            entry.getValue().replace(
+      Map<String, String> environment = launchContext.getEnvironment();
+      // Make a copy of env to iterate & do variable expansion
+      for (Entry<String, String> entry : environment.entrySet()) {
+        String value = entry.getValue();
+        entry.setValue(
+            value.replace(
                 ApplicationConstants.LOG_DIR_EXPANSION_VAR,
-                containerLogDir.toUri().getPath()));
+                containerLogDir.toUri().getPath())
+            );
       }
-      launchContext.setEnvironment(newEnvs);
       // /////////////////////////// End of variable expansion
 
       FileContext lfs = FileContext.getLocalFSFileContext();
@@ -164,11 +164,18 @@ public class ContainerLaunch implements 
               EnumSet.of(CREATE, OVERWRITE));
 
         // Set the token location too.
-        env.put(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, new Path(
-            containerWorkDir, FINAL_CONTAINER_TOKENS_FILE).toUri().getPath());
-
-        writeLaunchEnv(containerScriptOutStream, env, localResources,
-            launchContext.getCommands(), appDirs);
+        environment.put(
+            ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, 
+            new Path(containerWorkDir, 
+                FINAL_CONTAINER_TOKENS_FILE).toUri().getPath());
+
+        // Sanitize the container's environment
+        sanitizeEnv(environment, containerWorkDir, appDirs);
+        
+        // Write out the environment
+        writeLaunchEnv(containerScriptOutStream, environment, localResources,
+            launchContext.getCommands());
+        
         // /////////// End of writing out container-script
 
         // /////////// Write out the container-tokens in the nmPrivate space.
@@ -275,19 +282,71 @@ public class ContainerLaunch implements 
   
   }
 
+  private static void putEnvIfNotNull(
+      Map<String, String> environment, String variable, String value) {
+    if (value != null) {
+      environment.put(variable, value);
+    }
+  }
+  
+  private static void putEnvIfAbsent(
+      Map<String, String> environment, String variable) {
+    if (environment.get(variable) == null) {
+      putEnvIfNotNull(environment, variable, System.getenv(variable));
+    }
+  }
+  
+  public void sanitizeEnv(Map<String, String> environment, 
+      Path pwd, List<Path> appDirs) {
+    /**
+     * Non-modifiable environment variables
+     */
+    
+    putEnvIfNotNull(environment, Environment.USER.name(), container.getUser());
+    
+    putEnvIfNotNull(environment, 
+        Environment.LOGNAME.name(),container.getUser());
+    
+    putEnvIfNotNull(environment, 
+        Environment.HOME.name(),
+        conf.get(
+            YarnConfiguration.NM_USER_HOME_DIR, 
+            YarnConfiguration.DEFAULT_NM_USER_HOME_DIR
+            )
+        );
+    
+    putEnvIfNotNull(environment, Environment.PWD.name(), pwd.toString());
+    
+    putEnvIfNotNull(environment, 
+        Environment.HADOOP_CONF_DIR.name(), 
+        System.getenv(Environment.HADOOP_CONF_DIR.name())
+        );
+    
+    putEnvIfNotNull(environment, 
+        ApplicationConstants.LOCAL_DIR_ENV, 
+        StringUtils.join(",", appDirs)
+        );
+
+    if (!Shell.WINDOWS) {
+      environment.put("JVM_PID", "$$");
+    }
+
+    /**
+     * Modifiable environment variables
+     */
+    
+    putEnvIfAbsent(environment, Environment.JAVA_HOME.name());
+    putEnvIfAbsent(environment, Environment.HADOOP_COMMON_HOME.name());
+    putEnvIfAbsent(environment, Environment.HADOOP_HDFS_HOME.name());
+    putEnvIfAbsent(environment, Environment.YARN_HOME.name());
+
+  }
+  
   private static void writeLaunchEnv(OutputStream out,
       Map<String,String> environment, Map<Path,String> resources,
-      List<String> command, List<Path> appDirs)
+      List<String> command)
       throws IOException {
     ShellScriptBuilder sb = new ShellScriptBuilder();
-    if (System.getenv("YARN_HOME") != null) {
-      // TODO: Get from whitelist.
-      sb.env("YARN_HOME", System.getenv("YARN_HOME"));
-    }
-    sb.env(ApplicationConstants.LOCAL_DIR_ENV, StringUtils.join(",", appDirs));
-    if (!Shell.WINDOWS) {
-      sb.env("JVM_PID", "$$");
-    }
     if (environment != null) {
       for (Map.Entry<String,String> env : environment.entrySet()) {
         sb.env(env.getKey().toString(), env.getValue().toString());

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java Thu Sep 29 00:42:47 2011
@@ -31,8 +31,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
@@ -56,22 +54,26 @@ public class ContainerLogsPage extends N
     private final Configuration conf;
     private final LocalDirAllocator logsSelector;
     private final Context nmContext;
-    private final RecordFactory recordFactory;
 
     @Inject
     public ContainersLogsBlock(Configuration conf, Context context) {
       this.conf = conf;
       this.logsSelector = new LocalDirAllocator(YarnConfiguration.NM_LOG_DIRS);
       this.nmContext = context;
-      this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
     }
 
     @Override
     protected void render(Block html) {
       DIV<Hamlet> div = html.div("#content");
 
-      ContainerId containerId =
-        ConverterUtils.toContainerId(this.recordFactory, $(CONTAINER_ID));
+      ContainerId containerId;
+      try {
+        containerId = ConverterUtils.toContainerId($(CONTAINER_ID));
+      } catch (IOException e) {
+        div.h1("Invalid containerId " + $(CONTAINER_ID))._();
+        return;
+      }
+
       Container container = this.nmContext.getContainers().get(containerId);
 
       if (container == null) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java Thu Sep 29 00:42:47 2011
@@ -18,20 +18,21 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.webapp;
 
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
-import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -53,22 +54,30 @@ public class ContainerPage extends NMVie
 
   public static class ContainerBlock extends HtmlBlock implements NMWebParams {
 
-    private final Configuration conf;
     private final Context nmContext;
-    private final RecordFactory recordFactory;
 
     @Inject
-    public ContainerBlock(Configuration conf, Context nmContext) {
-      this.conf = conf;
+    public ContainerBlock(Context nmContext) {
       this.nmContext = nmContext;
-      this.recordFactory = RecordFactoryProvider.getRecordFactory(this.conf);
     }
 
     @Override
     protected void render(Block html) {
-      ContainerId containerID =
-        ConverterUtils.toContainerId(this.recordFactory, $(CONTAINER_ID));
+      ContainerId containerID;
+      try {
+        containerID = ConverterUtils.toContainerId($(CONTAINER_ID));
+      } catch (IOException e) {
+        html.p()._("Invalid containerId " + $(CONTAINER_ID))._();
+        return;
+      }
+
+      DIV<Hamlet> div = html.div("#content");
       Container container = this.nmContext.getContainers().get(containerID);
+      if (container == null) {
+        div.h1("Unknown Container. Container might have completed, "
+                + "please go back to the previous page and retry.")._();
+        return;
+      }
       ContainerStatus containerData = container.cloneAndGetContainerStatus();
       int exitCode = containerData.getExitStatus();
       String exiStatus = 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java Thu Sep 29 00:42:47 2011
@@ -57,7 +57,7 @@ public class WebServer extends AbstractS
     LOG.info("Instantiating NMWebApp at " + bindAddress);
     try {
       this.webApp =
-          WebApps.$for("yarn", Context.class, this.nmContext)
+          WebApps.$for("node", Context.class, this.nmContext)
               .at(bindAddress).with(getConfig())
               .start(new NMWebApp(this.resourceView));
     } catch (Exception e) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties Thu Sep 29 00:42:47 2011
@@ -12,12 +12,12 @@ log4j.threshold=ALL
 #
 
 #Default values
-hadoop.yarn.mr.containerLogDir=null
-hadoop.yarn.mr.totalLogFileSize=100
+yarn.app.mapreduce.container.log.dir=null
+yarn.app.mapreduce.container.log.filesize=100
 
 log4j.appender.CLA=org.apache.hadoop.yarn.ContainerLogAppender
-log4j.appender.CLA.containerLogDir=${hadoop.yarn.mr.containerLogDir}
-log4j.appender.CLA.totalLogFileSize=${hadoop.yarn.mr.totalLogFileSize}
+log4j.appender.CLA.containerLogDir=${yarn.app.mapreduce.container.log.dir}
+log4j.appender.CLA.totalLogFileSize=${yarn.app.mapreduce.container.log.filesize}
 
 log4j.appender.CLA.layout=org.apache.log4j.PatternLayout
 log4j.appender.CLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java Thu Sep 29 00:42:47 2011
@@ -105,8 +105,32 @@ public class TestContainerManagerWithLCE
     LOG.info("Running testContainerLaunchAndStop");
     super.testContainerLaunchAndStop();
   }
+  
+  @Override
+  public void testContainerLaunchAndExitSuccess() throws IOException,
+      InterruptedException {
+    // Don't run the test if the binary is not available.
+    if (!shouldRunTest()) {
+      LOG.info("LCE binary path is not passed. Not running the test");
+      return;
+    }
+    LOG.info("Running testContainerLaunchAndExitSuccess");
+    super.testContainerLaunchAndExitSuccess();
+  }
 
   @Override
+  public void testContainerLaunchAndExitFailure() throws IOException,
+      InterruptedException {
+    // Don't run the test if the binary is not available.
+    if (!shouldRunTest()) {
+      LOG.info("LCE binary path is not passed. Not running the test");
+      return;
+    }
+    LOG.info("Running testContainerLaunchAndExitFailure");
+    super.testContainerLaunchAndExitFailure();
+  }
+  
+  @Override
   public void testLocalFilesCleanup() throws InterruptedException,
       IOException {
     // Don't run the test if the binary is not available.

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java Thu Sep 29 00:42:47 2011
@@ -287,7 +287,95 @@ public class TestContainerManager extend
         exec.signalContainer(user,
             pid, Signal.NULL));
   }
+  
+  private void testContainerLaunchAndExit(int exitCode) throws IOException, InterruptedException {
 
+	  File scriptFile = new File(tmpDir, "scriptFile.sh");
+	  PrintWriter fileWriter = new PrintWriter(scriptFile);
+	  File processStartFile =
+			  new File(tmpDir, "start_file.txt").getAbsoluteFile();
+	  fileWriter.write("\numask 0"); // So that start file is readable by the test
+	  fileWriter.write("\necho Hello World! > " + processStartFile);
+	  fileWriter.write("\necho $$ >> " + processStartFile); 
+
+	  // Have script throw an exit code at the end
+	  if (exitCode != 0) {
+		  fileWriter.write("\nexit "+exitCode);
+	  }
+	  
+	  fileWriter.close();
+
+	  ContainerLaunchContext containerLaunchContext = 
+			  recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+	  // ////// Construct the Container-id
+	  ContainerId cId = createContainerId();
+	  containerLaunchContext.setContainerId(cId);
+
+	  containerLaunchContext.setUser(user);
+
+	  URL resource_alpha =
+			  ConverterUtils.getYarnUrlFromPath(localFS
+					  .makeQualified(new Path(scriptFile.getAbsolutePath())));
+	  LocalResource rsrc_alpha =
+			  recordFactory.newRecordInstance(LocalResource.class);
+	  rsrc_alpha.setResource(resource_alpha);
+	  rsrc_alpha.setSize(-1);
+	  rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
+	  rsrc_alpha.setType(LocalResourceType.FILE);
+	  rsrc_alpha.setTimestamp(scriptFile.lastModified());
+	  String destinationFile = "dest_file";
+	  Map<String, LocalResource> localResources = 
+			  new HashMap<String, LocalResource>();
+	  localResources.put(destinationFile, rsrc_alpha);
+	  containerLaunchContext.setLocalResources(localResources);
+	  containerLaunchContext.setUser(containerLaunchContext.getUser());
+	  List<String> commands = new ArrayList<String>();
+	  commands.add("/bin/bash");
+	  commands.add(scriptFile.getAbsolutePath());
+	  containerLaunchContext.setCommands(commands);
+	  containerLaunchContext.setResource(recordFactory
+			  .newRecordInstance(Resource.class));
+	  containerLaunchContext.getResource().setMemory(100 * 1024 * 1024);
+
+	  StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
+	  startRequest.setContainerLaunchContext(containerLaunchContext);
+	  containerManager.startContainer(startRequest);
+
+	  BaseContainerManagerTest.waitForContainerState(containerManager, cId,
+			  ContainerState.COMPLETE);
+
+	  GetContainerStatusRequest gcsRequest = 
+			  recordFactory.newRecordInstance(GetContainerStatusRequest.class);
+	  gcsRequest.setContainerId(cId);
+	  ContainerStatus containerStatus = 
+			  containerManager.getContainerStatus(gcsRequest).getStatus();
+
+	  // Verify exit status matches exit state of script
+	  Assert.assertEquals(exitCode,
+			  containerStatus.getExitStatus());	    
+  }
+  
+  @Test
+  public void testContainerLaunchAndExitSuccess() throws IOException, InterruptedException {
+	  containerManager.start();
+	  int exitCode = 0; 
+
+	  // launch context for a command that will return exit code 0 
+	  // and verify exit code returned 
+	  testContainerLaunchAndExit(exitCode);	  
+  }
+
+  @Test
+  public void testContainerLaunchAndExitFailure() throws IOException, InterruptedException {
+	  containerManager.start();
+	  int exitCode = 50; 
+
+	  // launch context for a command that will return exit code 0 
+	  // and verify exit code returned 
+	  testContainerLaunchAndExit(exitCode);	  
+  }
+  
   @Test
   public void testLocalFilesCleanup() throws InterruptedException,
       IOException {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java Thu Sep 29 00:42:47 2011
@@ -38,8 +38,6 @@ import java.util.Map.Entry;
 import java.util.Random;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.LocalResource;
@@ -137,6 +135,28 @@ public class TestContainer {
 
   @Test
   @SuppressWarnings("unchecked") // mocked generic
+  public void testExternalKill() throws Exception {
+    WrappedContainer wc = null;
+    try {
+      wc = new WrappedContainer(13, 314159265358979L, 4344, "yak");
+      wc.initContainer();
+      wc.localizeResources();
+      wc.launchContainer();
+      reset(wc.localizerBus);
+      wc.containerKilledOnRequest();
+      assertEquals(ContainerState.EXITED_WITH_FAILURE, 
+          wc.c.getContainerState());
+      verifyCleanupCall(wc);
+    }
+    finally {
+      if (wc != null) {
+        wc.finished();
+      }
+    }
+  }
+
+  @Test
+  @SuppressWarnings("unchecked") // mocked generic
   public void testCleanupOnFailure() throws Exception {
     WrappedContainer wc = null;
     try {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml Thu Sep 29 00:42:47 2011
@@ -37,6 +37,20 @@
 
   <build>
     <plugins>
+
+      <!-- Publish tests jar -->
+      <plugin>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+            <phase>test-compile</phase>
+          </execution>
+        </executions>
+      </plugin>
+
       <plugin>
         <artifactId>maven-antrun-plugin</artifactId>
         <executions>
@@ -98,4 +112,41 @@
       </plugin>
     </plugins>
   </build>
+
+  <profiles>
+    <profile>
+      <id>visualize</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
+            <version>1.2</version>
+            <executions>
+              <execution>
+                <phase>compile</phase>
+                <goals>
+                  <goal>java</goal>
+                </goals>
+                <configuration>
+                  <mainClass>org.apache.hadoop.yarn.util.VisualizeStateMachine</mainClass>
+                  <arguments>
+                    <argument>ResourceManager</argument>
+                    <argument>org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl,
+                      org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl,
+                      org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl,
+                      org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl</argument>
+                    <argument>ResourceManager.gv</argument>
+                  </arguments>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
 </project>

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java Thu Sep 29 00:42:47 2011
@@ -84,7 +84,7 @@ public class AdminService extends Abstra
     super.init(conf);
     String bindAddress =
       conf.get(YarnConfiguration.RM_ADMIN_ADDRESS,
-          YarnConfiguration.RM_ADMIN_ADDRESS);
+          YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS);
     masterServiceAddress =  NetUtils.createSocketAddr(bindAddress);
     adminAcl = 
       new AccessControlList(

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java Thu Sep 29 00:42:47 2011
@@ -36,8 +36,8 @@ import org.apache.hadoop.security.Securi
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@@ -46,8 +46,8 @@ import org.apache.hadoop.yarn.api.protoc
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationIdResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
@@ -165,11 +165,17 @@ public class ClientRMService extends Abs
   }
 
   @Override
-  public GetNewApplicationIdResponse getNewApplicationId(
-      GetNewApplicationIdRequest request) throws YarnRemoteException {
-    GetNewApplicationIdResponse response = recordFactory
-        .newRecordInstance(GetNewApplicationIdResponse.class);
+  public GetNewApplicationResponse getNewApplication(
+      GetNewApplicationRequest request) throws YarnRemoteException {
+    GetNewApplicationResponse response = recordFactory
+        .newRecordInstance(GetNewApplicationResponse.class);
     response.setApplicationId(getNewApplicationId());
+    // Pick up min/max resource from scheduler...
+    response.setMinimumResourceCapability(scheduler
+        .getMinimumResourceCapability());
+    response.setMaximumResourceCapability(scheduler
+        .getMaximumResourceCapability());       
+    
     return response;
   }
   
@@ -228,8 +234,8 @@ public class ClientRMService extends Abs
 
   @SuppressWarnings("unchecked")
   @Override
-  public FinishApplicationResponse finishApplication(
-      FinishApplicationRequest request) throws YarnRemoteException {
+  public KillApplicationResponse forceKillApplication(
+      KillApplicationRequest request) throws YarnRemoteException {
 
     ApplicationId applicationId = request.getApplicationId();
 
@@ -262,8 +268,8 @@ public class ClientRMService extends Abs
 
     RMAuditLogger.logSuccess(callerUGI.getShortUserName(), 
         AuditConstants.KILL_APP_REQUEST, "ClientRMService" , applicationId);
-    FinishApplicationResponse response = recordFactory
-        .newRecordInstance(FinishApplicationResponse.class);
+    KillApplicationResponse response = recordFactory
+        .newRecordInstance(KillApplicationResponse.class);
     return response;
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java Thu Sep 29 00:42:47 2011
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
@@ -250,13 +251,10 @@ public class RMAppManager implements Eve
 
       if (rmContext.getRMApps().putIfAbsent(applicationId, application) != 
           null) {
-        LOG.info("Application with id " + applicationId + 
-            " is already present! Cannot add a duplicate!");
-        // don't send event through dispatcher as it will be handled by app 
-        // already present with this id.
-        application.handle(new RMAppRejectedEvent(applicationId,
-            "Application with this id is already present! " +
-            "Cannot add a duplicate!"));
+        String message = "Application with id " + applicationId
+            + " is already present! Cannot add a duplicate!";
+        LOG.info(message);
+        throw RPCUtil.getRemoteException(message);
       } else {
         this.rmContext.getDispatcher().getEventHandler().handle(
             new RMAppEvent(applicationId, RMAppEventType.START));

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java Thu Sep 29 00:42:47 2011
@@ -22,7 +22,6 @@ import java.util.concurrent.ConcurrentHa
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore;
@@ -31,7 +30,6 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
-import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
 public class RMContextImpl implements RMContext {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java Thu Sep 29 00:42:47 2011
@@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.event.Disp
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
 import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
@@ -97,7 +98,7 @@ public class ResourceManager extends Com
   private ContainerAllocationExpirer containerAllocationExpirer;
   protected NMLivelinessMonitor nmLivelinessMonitor;
   protected NodesListManager nodesListManager;
-  private SchedulerEventDispatcher schedulerDispatcher;
+  private EventHandler<SchedulerEvent> schedulerDispatcher;
   protected RMAppManager rmAppManager;
 
   private WebApp webApp;
@@ -118,7 +119,7 @@ public class ResourceManager extends Com
   @Override
   public synchronized void init(Configuration conf) {
 
-    this.rmDispatcher = new AsyncDispatcher();
+    this.rmDispatcher = createDispatcher();
     addIfService(this.rmDispatcher);
 
     this.containerAllocationExpirer = new ContainerAllocationExpirer(
@@ -137,8 +138,8 @@ public class ResourceManager extends Com
     this.conf = new YarnConfiguration(conf);
     // Initialize the scheduler
     this.scheduler = createScheduler();
-    this.schedulerDispatcher = new SchedulerEventDispatcher(this.scheduler);
-    addService(this.schedulerDispatcher);
+    this.schedulerDispatcher = createSchedulerEventDispatcher();
+    addIfService(this.schedulerDispatcher);
     this.rmDispatcher.register(SchedulerEventType.class,
         this.schedulerDispatcher);
 
@@ -186,11 +187,22 @@ public class ResourceManager extends Com
     addService(adminService);
 
     this.applicationMasterLauncher = createAMLauncher();
+    this.rmDispatcher.register(AMLauncherEventType.class, 
+        this.applicationMasterLauncher);
+
     addService(applicationMasterLauncher);
 
     super.init(conf);
   }
 
+  protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() {
+    return new SchedulerEventDispatcher(this.scheduler);
+  }
+
+  protected Dispatcher createDispatcher() {
+    return new AsyncDispatcher();
+  }
+
   protected void addIfService(Object object) {
     if (object instanceof Service) {
       addService((Service) object);
@@ -381,7 +393,7 @@ public class ResourceManager extends Com
   }
 
   protected void startWepApp() {
-    webApp = WebApps.$for("yarn", masterService).at(
+    webApp = WebApps.$for("cluster", masterService).at(
         conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS,
         YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS)).
       start(new RMWebApp(this));

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java Thu Sep 29 00:42:47 2011
@@ -136,7 +136,7 @@ public class AMLauncher implements Runna
     containerMgrProxy.stopContainer(stopRequest);
   }
 
-  private ContainerManager getContainerMgrProxy(
+  protected ContainerManager getContainerMgrProxy(
       final ApplicationId applicationID) throws IOException {
 
     Container container = application.getMasterContainer();
@@ -173,23 +173,11 @@ public class AMLauncher implements Runna
     // Construct the actual Container
     ContainerLaunchContext container = 
         applicationMasterContext.getAMContainerSpec();
-    StringBuilder mergedCommand = new StringBuilder();
-    String failCount = Integer.toString(application.getAppAttemptId()
-        .getAttemptId());
-    List<String> commandList = new ArrayList<String>();
-    for (String str : container.getCommands()) {
-      // This is out-right wrong. AM FAIL count should be passed via env.
-      String result =
-          str.replaceFirst(ApplicationConstants.AM_FAIL_COUNT_STRING,
-              failCount);
-      mergedCommand.append(result).append(" ");
-      commandList.add(result);
-    }
-    container.setCommands(commandList);
-    /** add the failed count to the app master command line */
-   
-    LOG.info("Command to launch container " + 
-        containerID + " : " + mergedCommand);
+    LOG.info("Command to launch container "
+        + containerID
+        + " : "
+        + StringUtils.arrayToString(container.getCommands().toArray(
+            new String[0])));
     
     // Finalize the container
     container.setContainerId(containerID);
@@ -203,6 +191,11 @@ public class AMLauncher implements Runna
       ContainerLaunchContext container)
       throws IOException {
     Map<String, String> environment = container.getEnvironment();
+
+    // Set the AppAttemptId to be consumable by the AM.
+    environment.put(ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV,
+        application.getAppAttemptId().toString());
+
     if (UserGroupInformation.isSecurityEnabled()) {
       // TODO: Security enabled/disabled info should come from RM.
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java Thu Sep 29 00:42:47 2011
@@ -42,17 +42,16 @@ public class ApplicationMasterLauncher e
   private final BlockingQueue<Runnable> masterEvents
     = new LinkedBlockingQueue<Runnable>();
   
-  private ApplicationTokenSecretManager applicationTokenSecretManager;
+  protected ApplicationTokenSecretManager applicationTokenSecretManager;
   private ClientToAMSecretManager clientToAMSecretManager;
-  private final RMContext context;
+  protected final RMContext context;
   
-  public ApplicationMasterLauncher(ApplicationTokenSecretManager 
-      applicationTokenSecretManager, ClientToAMSecretManager clientToAMSecretManager,
+  public ApplicationMasterLauncher(
+      ApplicationTokenSecretManager applicationTokenSecretManager, 
+      ClientToAMSecretManager clientToAMSecretManager,
       RMContext context) {
     super(ApplicationMasterLauncher.class.getName());
     this.context = context;
-    /* register to dispatcher */
-    this.context.getDispatcher().register(AMLauncherEventType.class, this);
     this.launcherPool = new ThreadPoolExecutor(1, 10, 1, 
         TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>());
     this.launcherHandlingThread = new LauncherThread();

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java Thu Sep 29 00:42:47 2011
@@ -24,7 +24,6 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 
@@ -33,7 +32,7 @@ import org.apache.hadoop.yarn.server.res
  * look at {@link RMAppImpl} for its implementation. This interface 
  * exposes methods to access various updates in application status/report.
  */
-public interface RMApp extends EventHandler<RMAppEvent>{
+public interface RMApp extends EventHandler<RMAppEvent> {
 
   /**
    * The application id for this {@link RMApp}.

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java Thu Sep 29 00:42:47 2011
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType;
@@ -86,7 +87,8 @@ public class RMAppImpl implements RMApp 
   private long startTime;
   private long finishTime;
   private RMAppAttempt currentAttempt;
-
+  @SuppressWarnings("rawtypes")
+  private EventHandler handler;
   private static final FinalTransition FINAL_TRANSITION = new FinalTransition();
 
   private static final StateMachineFactory<RMAppImpl,
@@ -99,9 +101,6 @@ public class RMAppImpl implements RMApp 
                                            RMAppEvent>(RMAppState.NEW)
 
 
-     // TODO - ATTEMPT_KILLED not sent right now but should handle if 
-     // attempt starts sending
-
      // Transitions from NEW state
     .addTransition(RMAppState.NEW, RMAppState.SUBMITTED,
         RMAppEventType.START, new StartAppAttemptTransition())
@@ -116,7 +115,7 @@ public class RMAppImpl implements RMApp 
     .addTransition(RMAppState.SUBMITTED, RMAppState.ACCEPTED,
         RMAppEventType.APP_ACCEPTED)
     .addTransition(RMAppState.SUBMITTED, RMAppState.KILLED,
-        RMAppEventType.KILL, new AppKilledTransition())
+        RMAppEventType.KILL, new KillAppAndAttemptTransition())
 
      // Transitions from ACCEPTED state
     .addTransition(RMAppState.ACCEPTED, RMAppState.RUNNING,
@@ -126,7 +125,7 @@ public class RMAppImpl implements RMApp 
         RMAppEventType.ATTEMPT_FAILED,
         new AttemptFailedTransition(RMAppState.SUBMITTED))
     .addTransition(RMAppState.ACCEPTED, RMAppState.KILLED,
-        RMAppEventType.KILL, new AppKilledTransition())
+        RMAppEventType.KILL, new KillAppAndAttemptTransition())
 
      // Transitions from RUNNING state
     .addTransition(RMAppState.RUNNING, RMAppState.FINISHED,
@@ -136,7 +135,7 @@ public class RMAppImpl implements RMApp 
         RMAppEventType.ATTEMPT_FAILED,
         new AttemptFailedTransition(RMAppState.SUBMITTED))
     .addTransition(RMAppState.RUNNING, RMAppState.KILLED,
-        RMAppEventType.KILL, new AppKilledTransition())
+        RMAppEventType.KILL, new KillAppAndAttemptTransition())
 
      // Transitions from FINISHED state
     .addTransition(RMAppState.FINISHED, RMAppState.FINISHED,
@@ -168,6 +167,7 @@ public class RMAppImpl implements RMApp 
     this.name = name;
     this.rmContext = rmContext;
     this.dispatcher = rmContext.getDispatcher();
+    this.handler = dispatcher.getEventHandler();
     this.conf = config;
     this.user = user;
     this.queue = queue;
@@ -310,7 +310,8 @@ public class RMAppImpl implements RMApp 
       return BuilderUtils.newApplicationReport(this.applicationId, this.user,
           this.queue, this.name, host, rpcPort, clientToken,
           createApplicationState(this.stateMachine.getCurrentState()),
-          this.diagnostics.toString(), trackingUrl, this.startTime);
+          this.diagnostics.toString(), trackingUrl, 
+          this.startTime, this.finishTime);
     } finally {
       this.readLock.unlock();
     }
@@ -402,7 +403,7 @@ public class RMAppImpl implements RMApp 
         submissionContext);
     attempts.put(appAttemptId, attempt);
     currentAttempt = attempt;
-    dispatcher.getEventHandler().handle(
+    handler.handle(
         new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.START));
   }
 
@@ -419,13 +420,23 @@ public class RMAppImpl implements RMApp 
     };
   }
 
-  private static final class AppKilledTransition extends FinalTransition {
+  private static class AppKilledTransition extends FinalTransition {
+    @Override
     public void transition(RMAppImpl app, RMAppEvent event) {
       app.diagnostics.append("Application killed by user.");
       super.transition(app, event);
     };
   }
 
+  private static class KillAppAndAttemptTransition extends AppKilledTransition {
+    @SuppressWarnings("unchecked")
+    @Override
+    public void transition(RMAppImpl app, RMAppEvent event) {
+      app.handler.handle(new RMAppAttemptEvent(app.currentAttempt.getAppAttemptId(), 
+          RMAppAttemptEventType.KILL));
+      super.transition(app, event);
+    }
+  }
   private static final class AppRejectedTransition extends
       FinalTransition{
     public void transition(RMAppImpl app, RMAppEvent event) {
@@ -449,11 +460,11 @@ public class RMAppImpl implements RMApp 
     public void transition(RMAppImpl app, RMAppEvent event) {
       Set<NodeId> nodes = getNodesOnWhichAttemptRan(app);
       for (NodeId nodeId : nodes) {
-        app.dispatcher.getEventHandler().handle(
+        app.handler.handle(
             new RMNodeCleanAppEvent(nodeId, app.applicationId));
       }
       app.finishTime = System.currentTimeMillis();
-      app.dispatcher.getEventHandler().handle(
+      app.handler.handle(
           new RMAppManagerEvent(app.applicationId, 
           RMAppManagerEventType.APP_COMPLETED));
     };
@@ -470,11 +481,13 @@ public class RMAppImpl implements RMApp 
 
     @Override
     public RMAppState transition(RMAppImpl app, RMAppEvent event) {
-
+      
+      RMAppFailedAttemptEvent failedEvent = ((RMAppFailedAttemptEvent)event);
       if (app.attempts.size() == app.maxRetries) {
         String msg = "Application " + app.getApplicationId()
         + " failed " + app.maxRetries
-        + " times. Failing the application.";
+        + " times due to " + failedEvent.getDiagnostics()
+        + ". Failing the application.";
         LOG.info(msg);
         app.diagnostics.append(msg);
         // Inform the node for app-finish

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java Thu Sep 29 00:42:47 2011
@@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.server.res
  * {@link YarnConfiguration#RM_AM_MAX_RETRIES}. For specific 
  * implementation take a look at {@link RMAppAttemptImpl}.
  */
-public interface RMAppAttempt extends EventHandler<RMAppAttemptEvent>{
+public interface RMAppAttempt extends EventHandler<RMAppAttemptEvent> {
 
   /**
    * Get the application attempt id for this {@link RMAppAttempt}.
@@ -79,7 +79,7 @@ public interface RMAppAttempt extends Ev
    * Diagnostics information for the application attempt.
    * @return diagnostics information for the application attempt.
    */
-  StringBuilder getDiagnostics();
+  String getDiagnostics();
 
   /**
    * Progress for the application attempt.

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java Thu Sep 29 00:42:47 2011
@@ -31,6 +31,7 @@ import java.util.concurrent.locks.Reentr
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -47,6 +48,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAcquiredEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
@@ -104,10 +106,10 @@ public class RMAppAttemptImpl implements
   private Container masterContainer;
 
   private float progress = 0;
-  private String host;
+  private String host = "N/A";
   private int rpcPort;
-  private String trackingUrl;
-  private String finalState;
+  private String trackingUrl = "N/A";
+  private String finalState = "N/A";
   private final StringBuilder diagnostics = new StringBuilder();
 
   private static final StateMachineFactory<RMAppAttemptImpl,
@@ -123,7 +125,8 @@ public class RMAppAttemptImpl implements
       .addTransition(RMAppAttemptState.NEW, RMAppAttemptState.SUBMITTED,
           RMAppAttemptEventType.START, new AttemptStartedTransition())
       .addTransition(RMAppAttemptState.NEW, RMAppAttemptState.KILLED,
-          RMAppAttemptEventType.KILL)
+          RMAppAttemptEventType.KILL,
+          new BaseFinalTransition(RMAppAttemptState.KILLED))
 
       // Transitions from SUBMITTED state
       .addTransition(RMAppAttemptState.SUBMITTED, RMAppAttemptState.FAILED,
@@ -323,16 +326,26 @@ public class RMAppAttemptImpl implements
   }
 
   @Override
-  public StringBuilder getDiagnostics() {
+  public String getDiagnostics() {
     this.readLock.lock();
 
     try {
-      return this.diagnostics;
+      return this.diagnostics.toString();
     } finally {
       this.readLock.unlock();
     }
   }
 
+  public void setDiagnostics(String message) {
+    this.writeLock.lock();
+
+    try {
+      this.diagnostics.append(message);
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
+
   @Override
   public float getProgress() {
     this.readLock.lock();
@@ -446,10 +459,17 @@ public class RMAppAttemptImpl implements
         RMAppAttemptEvent event) {
 
       RMAppAttemptRejectedEvent rejectedEvent = (RMAppAttemptRejectedEvent) event;
+      
+      // Save the diagnostic message
+      String message = rejectedEvent.getMessage();
+      appAttempt.setDiagnostics(message);
+      
       // Send the rejection event to app
-      appAttempt.eventHandler.handle(new RMAppRejectedEvent(rejectedEvent
-          .getApplicationAttemptId().getApplicationId(), rejectedEvent
-          .getMessage()));
+      appAttempt.eventHandler.handle(
+          new RMAppRejectedEvent(
+              rejectedEvent.getApplicationAttemptId().getApplicationId(), 
+              message)
+          );
     }
   }
 
@@ -472,8 +492,6 @@ public class RMAppAttemptImpl implements
       ResourceRequest request = BuilderUtils.newResourceRequest(
           AM_CONTAINER_PRIORITY, "*", appAttempt.submissionContext
               .getAMContainerSpec().getResource(), 1);
-      LOG.debug("About to request resources for AM of "
-          + appAttempt.applicationAttemptId + " required " + request);
 
       appAttempt.scheduler.allocate(appAttempt.applicationAttemptId,
           Collections.singletonList(request), EMPTY_CONTAINER_RELEASE_LIST);
@@ -517,23 +535,39 @@ public class RMAppAttemptImpl implements
           .unregisterAttempt(appAttempt.applicationAttemptId);
 
       // Tell the application and the scheduler
-      RMAppEventType eventToApp = null;
+      ApplicationId applicationId = appAttempt.getAppAttemptId().getApplicationId();
+      RMAppEvent appEvent = null;
       switch (finalAttemptState) {
-      case FINISHED:
-        eventToApp = RMAppEventType.ATTEMPT_FINISHED;
+        case FINISHED:
+        {
+          appEvent = 
+              new RMAppEvent(applicationId, RMAppEventType.ATTEMPT_FINISHED);
+        }
         break;
-      case KILLED:
-        eventToApp = RMAppEventType.ATTEMPT_KILLED;
+        case KILLED:
+        {
+          appEvent = 
+              new RMAppFailedAttemptEvent(applicationId, 
+                  RMAppEventType.ATTEMPT_KILLED, 
+                  "Application killed by user.");
+        }
         break;
-      case FAILED:
-        eventToApp = RMAppEventType.ATTEMPT_FAILED;
+        case FAILED:
+        {
+          appEvent = 
+              new RMAppFailedAttemptEvent(applicationId, 
+                  RMAppEventType.ATTEMPT_FAILED, 
+                  appAttempt.getDiagnostics());
+        }
         break;
-      default:
-        LOG.info("Cannot get this state!! Error!!");
+        default:
+        {
+          LOG.error("Cannot get this state!! Error!!");
+        }
         break;
       }
-      appAttempt.eventHandler.handle(new RMAppEvent(
-          appAttempt.applicationAttemptId.getApplicationId(), eventToApp));
+      
+      appAttempt.eventHandler.handle(appEvent);
       appAttempt.eventHandler.handle(new AppRemovedSchedulerEvent(appAttempt
           .getAppAttemptId(), finalAttemptState));
     }
@@ -621,16 +655,23 @@ public class RMAppAttemptImpl implements
     public void transition(RMAppAttemptImpl appAttempt,
         RMAppAttemptEvent event) {
 
+      RMAppAttemptContainerFinishedEvent finishEvent =
+          ((RMAppAttemptContainerFinishedEvent)event);
+      
       // UnRegister from AMLivelinessMonitor
       appAttempt.rmContext.getAMLivelinessMonitor().unregister(
           appAttempt.getAppAttemptId());
 
-      // Tell the app, scheduler
-      super.transition(appAttempt, event);
+      // Setup diagnostic message
+      ContainerStatus status = finishEvent.getContainerStatus();
+      appAttempt.diagnostics.append("AM Container for " +
+          appAttempt.getAppAttemptId() + " exited with " +
+          " exitCode: " + status.getExitStatus() + 
+          " due to: " +  status.getDiagnostics() + "." +
+          "Failing this attempt.");
 
-      // Use diagnostic saying crashed.
-      appAttempt.diagnostics.append("AM Container for "
-          + appAttempt.getAppAttemptId() + " exited. Failing this attempt.");
+      // Tell the app, scheduler
+      super.transition(appAttempt, finishEvent);
     }
   }
 
@@ -644,6 +685,8 @@ public class RMAppAttemptImpl implements
     public void transition(RMAppAttemptImpl appAttempt,
         RMAppAttemptEvent event) {
 
+      appAttempt.progress = 1.0f;
+
       // Tell the app and the scheduler
       super.transition(appAttempt, event);
 
@@ -725,6 +768,13 @@ public class RMAppAttemptImpl implements
       // the AMContainer, AppAttempt fails
       if (appAttempt.masterContainer.getId().equals(
           containerStatus.getContainerId())) {
+        // Setup diagnostic message
+        appAttempt.diagnostics.append("AM Container for " +
+            appAttempt.getAppAttemptId() + " exited with " +
+            " exitCode: " + containerStatus.getExitStatus() + 
+            " due to: " +  containerStatus.getDiagnostics() + "." +
+            "Failing this attempt.");
+
         new FinalTransition(RMAppAttemptState.FAILED).transition(
             appAttempt, containerFinishedEvent);
         return RMAppAttemptState.FAILED;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java Thu Sep 29 00:42:47 2011
@@ -144,9 +144,10 @@ public class RMNodeImpl implements RMNod
     this.httpPort = httpPort;
     this.totalCapability = capability; 
     this.nodeAddress = hostName + ":" + cmPort;
-    this.httpAddress = hostName + ":" + httpPort;;
+    this.httpAddress = hostName + ":" + httpPort;
     this.node = node;
     this.nodeHealthStatus.setIsNodeHealthy(true);
+    this.nodeHealthStatus.setHealthReport("Healthy");
     this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis());
 
     this.latestHeartBeatResponse.setResponseId(0);
@@ -222,6 +223,18 @@ public class RMNodeImpl implements RMNod
     }
   }
 
+  private void setNodeHealthStatus(NodeHealthStatus status)
+  {
+    this.writeLock.lock();
+    try {
+      this.nodeHealthStatus.setHealthReport(status.getHealthReport());
+      this.nodeHealthStatus.setIsNodeHealthy(status.getIsNodeHealthy());
+      this.nodeHealthStatus.setLastHealthReportTime(status.getLastHealthReportTime());
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
+
   @Override
   public RMNodeState getState() {
     this.readLock.lock();
@@ -345,7 +358,10 @@ public class RMNodeImpl implements RMNod
       // Switch the last heartbeatresponse.
       rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse();
 
-      if (!statusEvent.getNodeHealthStatus().getIsNodeHealthy()) {
+      NodeHealthStatus remoteNodeHealthStatus = 
+          statusEvent.getNodeHealthStatus();
+      rmNode.setNodeHealthStatus(remoteNodeHealthStatus);
+      if (!remoteNodeHealthStatus.getIsNodeHealthy()) {
         // Inform the scheduler
         rmNode.context.getDispatcher().getEventHandler().handle(
             new NodeRemovedSchedulerEvent(rmNode));
@@ -392,8 +408,9 @@ public class RMNodeImpl implements RMNod
 
       // Switch the last heartbeatresponse.
       rmNode.latestHeartBeatResponse = statusEvent.getLatestResponse();
-
-      if (statusEvent.getNodeHealthStatus().getIsNodeHealthy()) {
+      NodeHealthStatus remoteNodeHealthStatus = statusEvent.getNodeHealthStatus();
+      rmNode.setNodeHealthStatus(remoteNodeHealthStatus);
+      if (remoteNodeHealthStatus.getIsNodeHealthy()) {
         rmNode.context.getDispatcher().getEventHandler().handle(
             new NodeAddedSchedulerEvent(rmNode));
         return RMNodeState.RUNNING;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java Thu Sep 29 00:42:47 2011
@@ -32,10 +32,8 @@ import static org.apache.hadoop.metrics2
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterInt;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
-import org.apache.hadoop.yarn.api.records.ApplicationState;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-import org.apache.hadoop.yarn.util.Self;
 import static org.apache.hadoop.yarn.server.resourcemanager.resource.Resources.*;
 
 import org.slf4j.LoggerFactory;
@@ -282,4 +280,56 @@ public class QueueMetrics {
       parent.unreserveResource(user, res);
     }
   }
+  
+  public int getAppsSubmitted() {
+    return appsSubmitted.value();
+  }
+  
+  public int getAppsRunning() {
+    return appsRunning.value();
+  }
+  
+  public int getAppsPending() {
+    return appsPending.value();
+  }
+  
+  public int getAppsCompleted() {
+    return appsCompleted.value();
+  }
+  
+  public int getAppsKilled() {
+    return appsKilled.value();
+  }
+  
+  public int getAppsFailed() {
+    return appsFailed.value();
+  }
+
+  public int getAllocatedGB() {
+    return allocatedGB.value();
+  }
+
+  public int getAllocatedContainers() {
+    return allocatedContainers.value();
+  }
+  
+  public int getAvailableGB() {
+    return availableGB.value();
+  }  
+
+  public int getPendingGB() {
+    return pendingGB.value();
+  }
+
+  public int getPendingContainers() {
+    return pendingContainers.value();
+  }
+  
+  public int getReservedGB() {
+    return reservedGB.value();
+  }
+
+  public int getReservedContainers() {
+    return reservedContainers.value();
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java?rev=1177130&r1=1177129&r2=1177130&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java Thu Sep 29 00:42:47 2011
@@ -207,13 +207,18 @@ public class SchedulerApp {
         .getDispatcher().getEventHandler(), this.rmContext
         .getContainerAllocationExpirer());
 
+    // Add it to allContainers list.
+    newlyAllocatedContainers.add(rmContainer);
+    liveContainers.put(container.getId(), rmContainer);    
+
     // Update consumption and track allocations
-    
+    appSchedulingInfo.allocate(type, node, priority, request, container);
+    Resources.addTo(currentConsumption, container.getResource());
+
     // Inform the container
     rmContainer.handle(
         new RMContainerEvent(container.getId(), RMContainerEventType.START));
 
-    Resources.addTo(currentConsumption, container.getResource());
     if (LOG.isDebugEnabled()) {
       LOG.debug("allocate: applicationAttemptId=" 
           + container.getId().getApplicationAttemptId() 
@@ -223,12 +228,6 @@ public class SchedulerApp {
     RMAuditLogger.logSuccess(getUser(), 
         AuditConstants.ALLOC_CONTAINER, "SchedulerApp", 
         getApplicationId(), container.getId());
-
-    // Add it to allContainers list.
-    newlyAllocatedContainers.add(rmContainer);
-    liveContainers.put(container.getId(), rmContainer);
-    
-    appSchedulingInfo.allocate(type, node, priority, request, container);
     
     return rmContainer;
   }