You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by ac...@apache.org on 2011/09/11 19:28:09 UTC

svn commit: r1169485 [2/2] - in /hadoop/common/branches/branch-0.23/hadoop-mapreduce-project: ./ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ hadoop-yarn/hadoop-yarn-api/src/main/java/org/apac...

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java Sun Sep 11 17:28:08 2011
@@ -36,10 +36,9 @@ import org.apache.hadoop.security.Access
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.Lock;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -59,11 +58,11 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
@@ -127,6 +126,8 @@ implements ResourceScheduler, CapacitySc
 
   private boolean initialized = false;
 
+  public CapacityScheduler() {}
+
   public CSQueue getRootQueue() {
     return root;
   }
@@ -392,12 +393,20 @@ implements ResourceScheduler, CapacitySc
     
     // Release all the running containers 
     for (RMContainer rmContainer : application.getLiveContainers()) {
-      completedContainer(rmContainer, RMContainerEventType.KILL);
+      completedContainer(rmContainer, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              rmContainer.getContainerId(), 
+              SchedulerUtils.COMPLETED_APPLICATION), 
+          RMContainerEventType.KILL);
     }
     
      // Release all reserved containers
     for (RMContainer rmContainer : application.getAllReservedContainers()) {
-      completedContainer(rmContainer, RMContainerEventType.KILL);
+      completedContainer(rmContainer, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              rmContainer.getContainerId(), 
+              "Application Complete"), 
+          RMContainerEventType.KILL);
     }
     
     // Clean up pending requests, metrics etc.
@@ -445,7 +454,11 @@ implements ResourceScheduler, CapacitySc
              "Trying to release container not owned by app or with invalid id",
              application.getApplicationId(), releasedContainerId);
       }
-      completedContainer(rmContainer, RMContainerEventType.RELEASED);
+      completedContainer(rmContainer,
+          SchedulerUtils.createAbnormalContainerStatus(
+              releasedContainerId, 
+              SchedulerUtils.RELEASED_CONTAINER),
+          RMContainerEventType.RELEASED);
     }
 
     synchronized (application) {
@@ -521,22 +534,23 @@ implements ResourceScheduler, CapacitySc
   }
 
   private synchronized void nodeUpdate(RMNode nm, 
-      Map<ApplicationId, List<Container>> containers ) {
+      List<ContainerStatus> newlyLaunchedContainers,
+      List<ContainerStatus> completedContainers) {
     LOG.info("nodeUpdate: " + nm + " clusterResources: " + clusterResource);
     
     SchedulerNode node = getNode(nm.getNodeID());
 
-    // Processing the current containers running/finished on node
-    for (List<Container> appContainers : containers.values()) {
-      for (Container container : appContainers) {
-        if (container.getState() == ContainerState.RUNNING) {
-          containerLaunchedOnNode(container, node);
-        } else { // has to be 'COMPLETE'
-          LOG.info("DEBUG --- Container FINISHED: " + container.getId());
-          completedContainer(getRMContainer(container.getId()), 
-              RMContainerEventType.FINISHED);
-        }
-      }
+    // Processing the newly launched containers
+    for (ContainerStatus launchedContainer : newlyLaunchedContainers) {
+      containerLaunchedOnNode(launchedContainer.getContainerId(), node);
+    }
+
+    // Process completed containers
+    for (ContainerStatus completedContainer : completedContainers) {
+      ContainerId containerId = completedContainer.getContainerId();
+      LOG.info("DEBUG --- Container FINISHED: " + containerId);
+      completedContainer(getRMContainer(containerId), 
+          completedContainer, RMContainerEventType.FINISHED);
     }
 
     // Now node data structures are upto date and ready for scheduling.
@@ -571,18 +585,18 @@ implements ResourceScheduler, CapacitySc
 
   }
 
-  private void containerLaunchedOnNode(Container container, SchedulerNode node) {
+  private void containerLaunchedOnNode(ContainerId containerId, SchedulerNode node) {
     // Get the application for the finished container
-    ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
+    ApplicationAttemptId applicationAttemptId = containerId.getAppAttemptId();
     SchedulerApp application = getApplication(applicationAttemptId);
     if (application == null) {
       LOG.info("Unknown application: " + applicationAttemptId + 
-          " launched container " + container.getId() +
+          " launched container " + containerId +
           " on node: " + node);
       return;
     }
     
-    application.containerLaunchedOnNode(container.getId());
+    application.containerLaunchedOnNode(containerId);
   }
 
   @Override
@@ -604,7 +618,8 @@ implements ResourceScheduler, CapacitySc
     {
       NodeUpdateSchedulerEvent nodeUpdatedEvent = (NodeUpdateSchedulerEvent)event;
       nodeUpdate(nodeUpdatedEvent.getRMNode(), 
-          nodeUpdatedEvent.getContainers());
+          nodeUpdatedEvent.getNewlyLaunchedContainers(),
+          nodeUpdatedEvent.getCompletedContainers());
     }
     break;
     case APP_ADDED:
@@ -625,7 +640,11 @@ implements ResourceScheduler, CapacitySc
     {
       ContainerExpiredSchedulerEvent containerExpiredEvent = 
           (ContainerExpiredSchedulerEvent) event;
-      completedContainer(getRMContainer(containerExpiredEvent.getContainerId()), 
+      ContainerId containerId = containerExpiredEvent.getContainerId();
+      completedContainer(getRMContainer(containerId), 
+          SchedulerUtils.createAbnormalContainerStatus(
+              containerId, 
+              SchedulerUtils.EXPIRED_CONTAINER), 
           RMContainerEventType.EXPIRE);
     }
     break;
@@ -652,13 +671,21 @@ implements ResourceScheduler, CapacitySc
     // Remove running containers
     List<RMContainer> runningContainers = node.getRunningContainers();
     for (RMContainer container : runningContainers) {
-      completedContainer(container, RMContainerEventType.KILL);
+      completedContainer(container, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              container.getContainerId(), 
+              SchedulerUtils.LOST_CONTAINER), 
+          RMContainerEventType.KILL);
     }
     
     // Remove reservations, if any
     RMContainer reservedContainer = node.getReservedContainer();
     if (reservedContainer != null) {
-      completedContainer(reservedContainer, RMContainerEventType.KILL);
+      completedContainer(reservedContainer, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              reservedContainer.getContainerId(), 
+              SchedulerUtils.LOST_CONTAINER), 
+          RMContainerEventType.KILL);
     }
 
     this.nodes.remove(nodeInfo.getNodeID());
@@ -667,8 +694,8 @@ implements ResourceScheduler, CapacitySc
   }
   
   @Lock(CapacityScheduler.class)
-  private synchronized void completedContainer(RMContainer rmContainer, 
-      RMContainerEventType event) {
+  private synchronized void completedContainer(RMContainer rmContainer,
+      ContainerStatus containerStatus, RMContainerEventType event) {
     if (rmContainer == null) {
       LOG.info("Null container completed...");
       return;
@@ -692,7 +719,7 @@ implements ResourceScheduler, CapacitySc
     // Inform the queue
     LeafQueue queue = (LeafQueue)application.getQueue();
     queue.completedContainer(clusterResource, application, node, 
-        rmContainer, event);
+        rmContainer, containerStatus, event);
 
     LOG.info("Application " + applicationAttemptId + 
         " released container " + container.getId() +

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java Sun Sep 11 17:28:08 2011
@@ -39,6 +39,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ContainerToken;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 
@@ -762,7 +764,11 @@ public class LeafQueue implements CSQueu
       // Release
       Container container = rmContainer.getContainer();
       completedContainer(clusterResource, application, node, 
-          rmContainer, RMContainerEventType.RELEASED);
+          rmContainer, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              container.getId(), 
+              SchedulerUtils.UNRESERVED_CONTAINER), 
+          RMContainerEventType.RELEASED);
       return container.getResource();
     }
 
@@ -1175,7 +1181,7 @@ public class LeafQueue implements CSQueu
   @Override
   public void completedContainer(Resource clusterResource, 
       SchedulerApp application, SchedulerNode node, RMContainer rmContainer, 
-      RMContainerEventType event) {
+      ContainerStatus containerStatus, RMContainerEventType event) {
     if (application != null) {
       // Careful! Locking order is important!
       synchronized (this) {
@@ -1190,7 +1196,7 @@ public class LeafQueue implements CSQueu
           application.unreserve(node, rmContainer.getReservedPriority());
           node.unreserveResource(application);
         } else {
-          application.containerCompleted(rmContainer, event);
+          application.containerCompleted(rmContainer, containerStatus, event);
           node.releaseContainer(container);
         }
 
@@ -1210,7 +1216,7 @@ public class LeafQueue implements CSQueu
 
       // Inform the parent queue
       parent.completedContainer(clusterResource, application, 
-          node, rmContainer, event);
+          node, rmContainer, null, event);
     }
   }
 

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java Sun Sep 11 17:28:08 2011
@@ -37,6 +37,7 @@ import org.apache.hadoop.security.Access
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
@@ -608,7 +609,7 @@ public class ParentQueue implements CSQu
   @Override
   public void completedContainer(Resource clusterResource,
       SchedulerApp application, SchedulerNode node, 
-      RMContainer rmContainer, RMContainerEventType event) {
+      RMContainer rmContainer, ContainerStatus containerStatus, RMContainerEventType event) {
     if (application != null) {
       // Careful! Locking order is important!
       // Book keeping
@@ -626,7 +627,7 @@ public class ParentQueue implements CSQu
       // Inform the parent
       if (parent != null) {
         parent.completedContainer(clusterResource, application, 
-            node, rmContainer, event);
+            node, rmContainer, null, event);
       }    
     }
   }

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java Sun Sep 11 17:28:08 2011
@@ -23,26 +23,33 @@ import java.util.Map;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
 public class NodeUpdateSchedulerEvent extends SchedulerEvent {
 
   private final RMNode rmNode;
-  private final Map<ApplicationId, List<Container>> containers;
+  private final List<ContainerStatus> newlyLaunchedContainers;
+  private final List<ContainerStatus> completedContainersStatuses;
 
   public NodeUpdateSchedulerEvent(RMNode rmNode,
-      Map<ApplicationId, List<Container>> containers) {
+      List<ContainerStatus> newlyLaunchedContainers,
+      List<ContainerStatus> completedContainers) {
     super(SchedulerEventType.NODE_UPDATE);
     this.rmNode = rmNode;
-    this.containers = containers;
+    this.newlyLaunchedContainers = newlyLaunchedContainers;
+    this.completedContainersStatuses = completedContainers;
   }
 
   public RMNode getRMNode() {
     return rmNode;
   }
 
-  public Map<ApplicationId, List<Container>> getContainers() {
-    return containers;
+  public List<ContainerStatus> getNewlyLaunchedContainers() {
+    return newlyLaunchedContainers;
   }
 
+  public List<ContainerStatus> getCompletedContainers() {
+    return completedContainersStatuses;
+  }
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java Sun Sep 11 17:28:08 2011
@@ -39,10 +39,9 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.Lock;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ContainerToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -73,6 +72,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
@@ -90,7 +90,7 @@ public class FifoScheduler implements Re
 
   private static final Log LOG = LogFactory.getLog(FifoScheduler.class);
 
-  private final RecordFactory recordFactory = 
+  private static final RecordFactory recordFactory = 
     RecordFactoryProvider.getRecordFactory(null);
 
   Configuration conf;
@@ -234,7 +234,11 @@ public class FifoScheduler implements Re
              "Trying to release container not owned by app or with invalid id",
              application.getApplicationId(), releasedContainer);
       }
-      containerCompleted(rmContainer, RMContainerEventType.RELEASED);
+      containerCompleted(rmContainer,
+          SchedulerUtils.createAbnormalContainerStatus(
+              releasedContainer, 
+              SchedulerUtils.RELEASED_CONTAINER),
+          RMContainerEventType.RELEASED);
     }
 
     if (!ask.isEmpty()) {
@@ -312,7 +316,11 @@ public class FifoScheduler implements Re
 
     // Kill all 'live' containers
     for (RMContainer container : application.getLiveContainers()) {
-      containerCompleted(container, RMContainerEventType.KILL);
+      containerCompleted(container, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              container.getContainerId(), 
+              SchedulerUtils.COMPLETED_APPLICATION),
+          RMContainerEventType.KILL);
     }
 
     // Clean up pending requests, metrics etc.
@@ -542,25 +550,22 @@ public class FifoScheduler implements Re
     return assignedContainers;
   }
 
-  private synchronized void nodeUpdate(RMNode rmNode,
-      Map<ApplicationId, List<Container>> remoteContainers) {
+  private synchronized void nodeUpdate(RMNode rmNode, 
+      List<ContainerStatus> newlyLaunchedContainers,
+      List<ContainerStatus> completedContainers) {
     SchedulerNode node = getNode(rmNode.getNodeID());
     
-    for (List<Container> appContainers : remoteContainers.values()) {
-      for (Container container : appContainers) {
-        /* make sure the scheduler hasnt already removed the applications */
-        if (getApplication(container.getId().getAppAttemptId()) != null) {
-          if (container.getState() == ContainerState.RUNNING) {
-            containerLaunchedOnNode(container, node);
-          } else { // has to COMPLETE
-            containerCompleted(getRMContainer(container.getId()), 
-                RMContainerEventType.FINISHED);
-          }
-        }
-        else {
-          LOG.warn("Scheduler not tracking application " + container.getId().getAppAttemptId());
-        }
-      }
+    // Processing the newly launched containers
+    for (ContainerStatus launchedContainer : newlyLaunchedContainers) {
+      containerLaunchedOnNode(launchedContainer.getContainerId(), node);
+    }
+
+    // Process completed containers
+    for (ContainerStatus completedContainer : completedContainers) {
+      ContainerId containerId = completedContainer.getContainerId();
+      LOG.info("DEBUG --- Container FINISHED: " + containerId);
+      containerCompleted(getRMContainer(containerId), 
+          completedContainer, RMContainerEventType.FINISHED);
     }
 
     if (Resources.greaterThanOrEqual(node.getAvailableResource(),
@@ -598,7 +603,8 @@ public class FifoScheduler implements Re
       NodeUpdateSchedulerEvent nodeUpdatedEvent = 
       (NodeUpdateSchedulerEvent)event;
       nodeUpdate(nodeUpdatedEvent.getRMNode(), 
-          nodeUpdatedEvent.getContainers());
+          nodeUpdatedEvent.getNewlyLaunchedContainers(),
+          nodeUpdatedEvent.getCompletedContainers());
     }
     break;
     case APP_ADDED:
@@ -624,7 +630,11 @@ public class FifoScheduler implements Re
     {
       ContainerExpiredSchedulerEvent containerExpiredEvent = 
           (ContainerExpiredSchedulerEvent) event;
-      containerCompleted(getRMContainer(containerExpiredEvent.getContainerId()), 
+      ContainerId containerid = containerExpiredEvent.getContainerId();
+      containerCompleted(getRMContainer(containerid), 
+          SchedulerUtils.createAbnormalContainerStatus(
+              containerid, 
+              SchedulerUtils.EXPIRED_CONTAINER),
           RMContainerEventType.EXPIRE);
     }
     break;
@@ -633,23 +643,23 @@ public class FifoScheduler implements Re
     }
   }
 
-  private void containerLaunchedOnNode(Container container, SchedulerNode node) {
+  private void containerLaunchedOnNode(ContainerId containerId, SchedulerNode node) {
     // Get the application for the finished container
-    ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
+    ApplicationAttemptId applicationAttemptId = containerId.getAppAttemptId();
     SchedulerApp application = getApplication(applicationAttemptId);
     if (application == null) {
       LOG.info("Unknown application: " + applicationAttemptId + 
-          " launched container " + container.getId() +
+          " launched container " + containerId +
           " on node: " + node);
       return;
     }
     
-    application.containerLaunchedOnNode(container.getId());
+    application.containerLaunchedOnNode(containerId);
   }
 
   @Lock(FifoScheduler.class)
   private synchronized void containerCompleted(RMContainer rmContainer,
-      RMContainerEventType event) {
+      ContainerStatus containerStatus, RMContainerEventType event) {
     if (rmContainer == null) {
       LOG.info("Null container completed...");
       return;
@@ -672,7 +682,7 @@ public class FifoScheduler implements Re
     }
 
     // Inform the application
-    application.containerCompleted(rmContainer, event);
+    application.containerCompleted(rmContainer, containerStatus, event);
 
     // Inform the node
     node.releaseContainer(container);
@@ -691,7 +701,11 @@ public class FifoScheduler implements Re
     SchedulerNode node = getNode(nodeInfo.getNodeID());
     // Kill running containers
     for(RMContainer container : node.getRunningContainers()) {
-      containerCompleted(container, RMContainerEventType.KILL);
+      containerCompleted(container, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              container.getContainerId(), 
+              SchedulerUtils.LOST_CONTAINER),
+              RMContainerEventType.KILL);
     }
     
     //Remove the node

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java Sun Sep 11 17:28:08 2011
@@ -25,6 +25,7 @@ import java.util.Map;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -53,9 +54,10 @@ public class MockNM {
   }
 
   public void containerStatus(Container container) throws Exception {
-    Map<ApplicationId, List<Container>> conts = new HashMap<ApplicationId, List<Container>>();
-    conts.put(container.getId().getAppId(), Arrays
-        .asList(new Container[] { container }));
+    Map<ApplicationId, List<ContainerStatus>> conts = 
+        new HashMap<ApplicationId, List<ContainerStatus>>();
+    conts.put(container.getId().getAppId(), 
+        Arrays.asList(new ContainerStatus[] { container.getContainerStatus() }));
     nodeHeartbeat(conts, true);
   }
 
@@ -76,16 +78,16 @@ public class MockNM {
   }
 
   public HeartbeatResponse nodeHeartbeat(boolean b) throws Exception {
-    return nodeHeartbeat(new HashMap<ApplicationId, List<Container>>(), b);
+    return nodeHeartbeat(new HashMap<ApplicationId, List<ContainerStatus>>(), b);
   }
 
   public HeartbeatResponse nodeHeartbeat(Map<ApplicationId, 
-      List<Container>> conts, boolean isHealthy) throws Exception {
+      List<ContainerStatus>> conts, boolean isHealthy) throws Exception {
     NodeHeartbeatRequest req = Records.newRecord(NodeHeartbeatRequest.class);
     NodeStatus status = Records.newRecord(NodeStatus.class);
     status.setNodeId(nodeId);
-    for (Map.Entry<ApplicationId, List<Container>> entry : conts.entrySet()) {
-      status.setContainers(entry.getKey(), entry.getValue());
+    for (Map.Entry<ApplicationId, List<ContainerStatus>> entry : conts.entrySet()) {
+      status.setContainersStatuses(entry.getValue());
     }
     NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class);
     healthStatus.setHealthReport("");

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java Sun Sep 11 17:28:08 2011
@@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -53,9 +54,7 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
-import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 
@@ -133,10 +132,19 @@ public class NodeManager implements Cont
   
   int responseID = 0;
   
+  private List<ContainerStatus> getContainerStatuses(Map<ApplicationId, List<Container>> containers) {
+    List<ContainerStatus> containerStatuses = new ArrayList<ContainerStatus>();
+    for (List<Container> appContainers : containers.values()) {
+      for (Container container : appContainers) {
+        containerStatuses.add(container.getContainerStatus());
+      }
+    }
+    return containerStatuses;
+  }
   public void heartbeat() throws IOException {
     NodeStatus nodeStatus = 
       org.apache.hadoop.yarn.server.resourcemanager.NodeManager.createNodeStatus(
-          nodeId, containers);
+          nodeId, getContainerStatuses(containers));
     nodeStatus.setResponseId(responseID);
     NodeHeartbeatRequest request = recordFactory
         .newRecordInstance(NodeHeartbeatRequest.class);
@@ -250,17 +258,29 @@ public class NodeManager implements Cont
 
   @Override
   synchronized public GetContainerStatusResponse getContainerStatus(GetContainerStatusRequest request) throws YarnRemoteException {
-    ContainerId containerID = request.getContainerId();
-    GetContainerStatusResponse response = recordFactory.newRecordInstance(GetContainerStatusResponse.class);
+    ContainerId containerId = request.getContainerId();
+    List<Container> appContainers = containers.get(containerId.getAppId());
+    Container container = null;
+    for (Container c : appContainers) {
+      if (c.getId().equals(containerId)) {
+        container = c;
+      }
+    }
+    GetContainerStatusResponse response = 
+        recordFactory.newRecordInstance(GetContainerStatusResponse.class);
+    if (container != null && container.getContainerStatus() != null) {
+      response.setStatus(container.getContainerStatus());
+    }
     return response;
   }
 
-  public static org.apache.hadoop.yarn.server.api.records.NodeStatus createNodeStatus(
-      NodeId nodeId, Map<ApplicationId, List<Container>> containers) {
+  public static org.apache.hadoop.yarn.server.api.records.NodeStatus 
+  createNodeStatus(NodeId nodeId, List<ContainerStatus> containers) {
     RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
-    org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
+    org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = 
+        recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
     nodeStatus.setNodeId(nodeId);
-    nodeStatus.addAllContainers(containers);
+    nodeStatus.setContainersStatuses(containers);
     NodeHealthStatus nodeHealthStatus = 
       recordFactory.newRecordInstance(NodeHealthStatus.class);
     nodeHealthStatus.setIsNodeHealthy(true);

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java Sun Sep 11 17:28:08 2011
@@ -66,11 +66,11 @@ public class TestApplicationCleanup {
     //kick the scheduler
     nm1.nodeHeartbeat(true);
     List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
-        new ArrayList<ContainerId>()).getNewContainerList();
+        new ArrayList<ContainerId>()).getAllocatedContainers();
     int contReceived = conts.size();
     while (contReceived < request) {
       conts = am.allocate(new ArrayList<ResourceRequest>(),
-          new ArrayList<ContainerId>()).getNewContainerList();
+          new ArrayList<ContainerId>()).getAllocatedContainers();
       contReceived += conts.size();
       Log.info("Got " + contReceived + " containers. Waiting to get " + request);
       Thread.sleep(2000);

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java Sun Sep 11 17:28:08 2011
@@ -92,12 +92,12 @@ public class TestFifoScheduler {
 
     // kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0
     nm1.nodeHeartbeat(true);
-    while (am1Response.getNewContainerCount() < 1) {
+    while (am1Response.getAllocatedContainers().size() < 1) {
       LOG.info("Waiting for containers to be created for app 1...");
       Thread.sleep(1000);
       am1Response = am1.schedule();
     }
-    while (am2Response.getNewContainerCount() < 1) {
+    while (am2Response.getAllocatedContainers().size() < 1) {
       LOG.info("Waiting for containers to be created for app 2...");
       Thread.sleep(1000);
       am2Response = am2.schedule();
@@ -105,12 +105,12 @@ public class TestFifoScheduler {
     // kick the scheduler, nothing given remaining 2 GB.
     nm2.nodeHeartbeat(true);
 
-    List<Container> allocated1 = am1Response.getNewContainerList();
+    List<Container> allocated1 = am1Response.getAllocatedContainers();
     Assert.assertEquals(1, allocated1.size());
     Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
     Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
 
-    List<Container> allocated2 = am2Response.getNewContainerList();
+    List<Container> allocated2 = am2Response.getAllocatedContainers();
     Assert.assertEquals(1, allocated2.size());
     Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory());
     Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
@@ -137,7 +137,7 @@ public class TestFifoScheduler {
       Thread.sleep(1000);
     }
     Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
-    Assert.assertEquals(1, am1.schedule().getFinishedContainerList().size());
+    Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
     Assert.assertEquals(5 * GB, rm.getResourceScheduler().getUsedResource(
         nm1.getNodeId()).getMemory());
 

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java Sun Sep 11 17:28:08 2011
@@ -86,11 +86,11 @@ public class TestRM {
     //kick the scheduler
     nm1.nodeHeartbeat(true);
     List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
-        new ArrayList<ContainerId>()).getNewContainerList();
+        new ArrayList<ContainerId>()).getAllocatedContainers();
     int contReceived = conts.size();
     while (contReceived < 3) {//only 3 containers are available on node1
       conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
-          new ArrayList<ContainerId>()).getNewContainerList());
+          new ArrayList<ContainerId>()).getAllocatedContainers());
       contReceived = conts.size();
       LOG.info("Got " + contReceived + " containers. Waiting to get " + 3);
       Thread.sleep(2000);
@@ -100,11 +100,11 @@ public class TestRM {
     //send node2 heartbeat
     nm2.nodeHeartbeat(true);
     conts = am.allocate(new ArrayList<ResourceRequest>(),
-        new ArrayList<ContainerId>()).getNewContainerList();
+        new ArrayList<ContainerId>()).getAllocatedContainers();
     contReceived = conts.size();
     while (contReceived < 10) {
       conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
-          new ArrayList<ContainerId>()).getNewContainerList());
+          new ArrayList<ContainerId>()).getAllocatedContainers());
       contReceived = conts.size();
       LOG.info("Got " + contReceived + " containers. Waiting to get " + 10);
       Thread.sleep(2000);

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Sun Sep 11 17:28:08 2011
@@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -148,7 +149,8 @@ public class TestLeafQueue {
     CSQueue parent = queue.getParent();
     doNothing().when(parent).completedContainer(
         any(Resource.class), any(SchedulerApp.class), any(SchedulerNode.class), 
-        any(RMContainer.class), any(RMContainerEventType.class));
+        any(RMContainer.class), any(ContainerStatus.class), 
+        any(RMContainerEventType.class));
     
     return queue;
   }
@@ -238,7 +240,7 @@ public class TestLeafQueue {
     // Release each container from app_0
     for (RMContainer rmContainer : app_0.getLiveContainers()) {
       a.completedContainer(clusterResource, app_0, node_0, rmContainer, 
-          RMContainerEventType.KILL);
+          null, RMContainerEventType.KILL);
     }
     assertEquals(1*GB, a.getUsedResources().getMemory());
     assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
@@ -247,7 +249,7 @@ public class TestLeafQueue {
     // Release each container from app_1
     for (RMContainer rmContainer : app_1.getLiveContainers()) {
       a.completedContainer(clusterResource, app_1, node_0, rmContainer, 
-          RMContainerEventType.KILL);
+          null, RMContainerEventType.KILL);
     }
     assertEquals(0*GB, a.getUsedResources().getMemory());
     assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
@@ -392,7 +394,7 @@ public class TestLeafQueue {
     // 8. Release each container from app_0
     for (RMContainer rmContainer : app_0.getLiveContainers()) {
       a.completedContainer(clusterResource, app_0, node_0, rmContainer, 
-          RMContainerEventType.KILL);
+          null, RMContainerEventType.KILL);
     }
     assertEquals(5*GB, a.getUsedResources().getMemory());
     assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
@@ -403,7 +405,7 @@ public class TestLeafQueue {
     // 9. Release each container from app_2
     for (RMContainer rmContainer : app_2.getLiveContainers()) {
       a.completedContainer(clusterResource, app_2, node_0, rmContainer, 
-          RMContainerEventType.KILL);
+          null, RMContainerEventType.KILL);
     }
     assertEquals(2*GB, a.getUsedResources().getMemory());
     assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
@@ -414,7 +416,7 @@ public class TestLeafQueue {
     // 10. Release each container from app_3
     for (RMContainer rmContainer : app_3.getLiveContainers()) {
       a.completedContainer(clusterResource, app_3, node_0, rmContainer, 
-          RMContainerEventType.KILL);
+          null, RMContainerEventType.KILL);
     }
     assertEquals(0*GB, a.getUsedResources().getMemory());
     assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
@@ -489,7 +491,7 @@ public class TestLeafQueue {
     
     // Now free 1 container from app_0 i.e. 1G
     a.completedContainer(clusterResource, app_0, node_0, 
-        app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL);
+        app_0.getLiveContainers().iterator().next(), null, RMContainerEventType.KILL);
     a.assignContainers(clusterResource, node_0);
     assertEquals(5*GB, a.getUsedResources().getMemory()); 
     assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
@@ -499,7 +501,7 @@ public class TestLeafQueue {
 
     // Now finish another container from app_0 and fulfill the reservation
     a.completedContainer(clusterResource, app_0, node_0, 
-        app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL);
+        app_0.getLiveContainers().iterator().next(), null, RMContainerEventType.KILL);
     a.assignContainers(clusterResource, node_0);
     assertEquals(4*GB, a.getUsedResources().getMemory());
     assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
@@ -582,7 +584,7 @@ public class TestLeafQueue {
     
     // Now free 1 container from app_0 i.e. 1G, and re-reserve it
     a.completedContainer(clusterResource, app_0, node_0, 
-        app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL);
+        app_0.getLiveContainers().iterator().next(), null, RMContainerEventType.KILL);
     a.assignContainers(clusterResource, node_0);
     assertEquals(5*GB, a.getUsedResources().getMemory()); 
     assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
@@ -613,7 +615,7 @@ public class TestLeafQueue {
     
     // Now finish another container from app_0 and see the reservation cancelled
     a.completedContainer(clusterResource, app_0, node_0, 
-        app_0.getLiveContainers().iterator().next(), RMContainerEventType.KILL);
+        app_0.getLiveContainers().iterator().next(), null, RMContainerEventType.KILL);
     a.assignContainers(clusterResource, node_0);
     assertEquals(4*GB, a.getUsedResources().getMemory());
     assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());

Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java?rev=1169485&r1=1169484&r2=1169485&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java Sun Sep 11 17:28:08 2011
@@ -241,7 +241,7 @@ public class TestContainerTokenSecretMan
     allocateRequest.addAllAsks(ask);
     allocateRequest.addAllReleases(release);
     List<Container> allocatedContainers = scheduler.allocate(allocateRequest)
-        .getAMResponse().getNewContainerList();
+        .getAMResponse().getAllocatedContainers();
 
     waitCounter = 0;
     while ((allocatedContainers == null || allocatedContainers.size() == 0)
@@ -251,7 +251,7 @@ public class TestContainerTokenSecretMan
       allocateRequest.setResponseId(allocateRequest.getResponseId() + 1);
       allocatedContainers =
           scheduler.allocate(allocateRequest).getAMResponse()
-              .getNewContainerList();
+              .getAllocatedContainers();
     }
 
     Assert.assertNotNull("Container is not allocted!", allocatedContainers);