You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by at...@apache.org on 2011/09/14 00:49:38 UTC

svn commit: r1170378 [10/12] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ conf/ dev-support/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-mapreduce-cli...

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java Tue Sep 13 22:49:27 2011
@@ -37,6 +37,7 @@ import org.apache.hadoop.security.Access
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
@@ -53,11 +54,11 @@ import org.apache.hadoop.yarn.server.res
 
 @Private
 @Evolving
-public class ParentQueue implements Queue {
+public class ParentQueue implements CSQueue {
 
   private static final Log LOG = LogFactory.getLog(ParentQueue.class);
 
-  private final Queue parent;
+  private final CSQueue parent;
   private final String queueName;
   
   private float capacity;
@@ -68,8 +69,8 @@ public class ParentQueue implements Queu
   private float usedCapacity = 0.0f;
   private float utilization = 0.0f;
 
-  private final Set<Queue> childQueues;
-  private final Comparator<Queue> queueComparator;
+  private final Set<CSQueue> childQueues;
+  private final Comparator<CSQueue> queueComparator;
   
   private Resource usedResources = 
     Resources.createResource(0);
@@ -94,7 +95,7 @@ public class ParentQueue implements Queu
     RecordFactoryProvider.getRecordFactory(null);
 
   public ParentQueue(CapacitySchedulerContext cs, 
-      String queueName, Comparator<Queue> comparator, Queue parent, Queue old) {
+      String queueName, Comparator<CSQueue> comparator, CSQueue parent, CSQueue old) {
     minimumAllocation = cs.getMinimumResourceCapability();
     
     this.parent = parent;
@@ -140,7 +141,7 @@ public class ParentQueue implements Queu
         maximumCapacity, absoluteMaxCapacity, state, acls);
     
     this.queueComparator = comparator;
-    this.childQueues = new TreeSet<Queue>(comparator);
+    this.childQueues = new TreeSet<CSQueue>(comparator);
 
     LOG.info("Initialized parent-queue " + queueName + 
         " name=" + queueName + 
@@ -180,11 +181,11 @@ public class ParentQueue implements Queu
   }
 
   private static float PRECISION = 0.005f; // 0.05% precision
-  void setChildQueues(Collection<Queue> childQueues) {
+  void setChildQueues(Collection<CSQueue> childQueues) {
     
     // Validate
     float childCapacities = 0;
-    for (Queue queue : childQueues) {
+    for (CSQueue queue : childQueues) {
       childCapacities += queue.getCapacity();
     }
     float delta = Math.abs(1.0f - childCapacities);  // crude way to check
@@ -200,7 +201,7 @@ public class ParentQueue implements Queu
   }
   
   @Override
-  public Queue getParent() {
+  public CSQueue getParent() {
     return parent;
   }
 
@@ -251,8 +252,8 @@ public class ParentQueue implements Queu
   }
 
   @Override
-  public synchronized List<Queue> getChildQueues() {
-    return new ArrayList<Queue>(childQueues);
+  public synchronized List<CSQueue> getChildQueues() {
+    return new ArrayList<CSQueue>(childQueues);
   }
 
   public synchronized int getNumContainers() {
@@ -280,7 +281,7 @@ public class ParentQueue implements Queu
 
     List<QueueInfo> childQueuesInfo = new ArrayList<QueueInfo>();
     if (includeChildQueues) {
-      for (Queue child : childQueues) {
+      for (CSQueue child : childQueues) {
         // Get queue information recursively?
         childQueuesInfo.add(
             child.getQueueInfo(recursive, recursive));
@@ -319,7 +320,7 @@ public class ParentQueue implements Queu
     userAcls.add(getUserAclInfo(user));
     
     // Add children queue acls
-    for (Queue child : childQueues) {
+    for (CSQueue child : childQueues) {
       userAcls.addAll(child.getQueueUserAclInfo(user));
     }
     return userAcls;
@@ -333,7 +334,7 @@ public class ParentQueue implements Queu
   }
   
   @Override
-  public synchronized void reinitialize(Queue queue, Resource clusterResource)
+  public synchronized void reinitialize(CSQueue queue, Resource clusterResource)
   throws IOException {
     // Sanity check
     if (!(queue instanceof ParentQueue) ||
@@ -346,13 +347,13 @@ public class ParentQueue implements Queu
 
     // Re-configure existing child queues and add new ones
     // The CS has already checked to ensure all existing child queues are present!
-    Map<String, Queue> currentChildQueues = getQueues(childQueues);
-    Map<String, Queue> newChildQueues = getQueues(parentQueue.childQueues);
-    for (Map.Entry<String, Queue> e : newChildQueues.entrySet()) {
+    Map<String, CSQueue> currentChildQueues = getQueues(childQueues);
+    Map<String, CSQueue> newChildQueues = getQueues(parentQueue.childQueues);
+    for (Map.Entry<String, CSQueue> e : newChildQueues.entrySet()) {
       String newChildQueueName = e.getKey();
-      Queue newChildQueue = e.getValue();
+      CSQueue newChildQueue = e.getValue();
 
-      Queue childQueue = currentChildQueues.get(newChildQueueName);
+      CSQueue childQueue = currentChildQueues.get(newChildQueueName);
       if (childQueue != null){
         childQueue.reinitialize(newChildQueue, clusterResource);
         LOG.info(getQueueName() + ": re-configured queue: " + childQueue);
@@ -375,9 +376,9 @@ public class ParentQueue implements Queu
     updateResource(clusterResource);
   }
 
-  Map<String, Queue> getQueues(Set<Queue> queues) {
-    Map<String, Queue> queuesMap = new HashMap<String, Queue>();
-    for (Queue queue : queues) {
+  Map<String, CSQueue> getQueues(Set<CSQueue> queues) {
+    Map<String, CSQueue> queuesMap = new HashMap<String, CSQueue>();
+    for (CSQueue queue : queues) {
       queuesMap.put(queue.getQueueName(), queue);
     }
     return queuesMap;
@@ -568,8 +569,8 @@ public class ParentQueue implements Queu
     printChildQueues();
 
     // Try to assign to most 'under-served' sub-queue
-    for (Iterator<Queue> iter=childQueues.iterator(); iter.hasNext();) {
-      Queue childQueue = iter.next();
+    for (Iterator<CSQueue> iter=childQueues.iterator(); iter.hasNext();) {
+      CSQueue childQueue = iter.next();
       LOG.info("DEBUG --- Trying to assign to" +
       		" queue: " + childQueue.getQueuePath() + 
       		" stats: " + childQueue);
@@ -595,7 +596,7 @@ public class ParentQueue implements Queu
 
   String getChildQueuesToPrint() {
     StringBuilder sb = new StringBuilder();
-    for (Queue q : childQueues) {
+    for (CSQueue q : childQueues) {
       sb.append(q.getQueuePath() + "(" + q.getUtilization() + "), ");
     }
     return sb.toString();
@@ -608,7 +609,7 @@ public class ParentQueue implements Queu
   @Override
   public void completedContainer(Resource clusterResource,
       SchedulerApp application, SchedulerNode node, 
-      RMContainer rmContainer, RMContainerEventType event) {
+      RMContainer rmContainer, ContainerStatus containerStatus, RMContainerEventType event) {
     if (application != null) {
       // Careful! Locking order is important!
       // Book keeping
@@ -626,7 +627,7 @@ public class ParentQueue implements Queu
       // Inform the parent
       if (parent != null) {
         parent.completedContainer(clusterResource, application, 
-            node, rmContainer, event);
+            node, rmContainer, null, event);
       }    
     }
   }
@@ -648,7 +649,7 @@ public class ParentQueue implements Queu
   @Override
   public synchronized void updateClusterResource(Resource clusterResource) {
     // Update all children
-    for (Queue childQueue : childQueues) {
+    for (CSQueue childQueue : childQueues) {
       childQueue.updateClusterResource(clusterResource);
     }
   }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/NodeUpdateSchedulerEvent.java Tue Sep 13 22:49:27 2011
@@ -23,26 +23,33 @@ import java.util.Map;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
 public class NodeUpdateSchedulerEvent extends SchedulerEvent {
 
   private final RMNode rmNode;
-  private final Map<ApplicationId, List<Container>> containers;
+  private final List<ContainerStatus> newlyLaunchedContainers;
+  private final List<ContainerStatus> completedContainersStatuses;
 
   public NodeUpdateSchedulerEvent(RMNode rmNode,
-      Map<ApplicationId, List<Container>> containers) {
+      List<ContainerStatus> newlyLaunchedContainers,
+      List<ContainerStatus> completedContainers) {
     super(SchedulerEventType.NODE_UPDATE);
     this.rmNode = rmNode;
-    this.containers = containers;
+    this.newlyLaunchedContainers = newlyLaunchedContainers;
+    this.completedContainersStatuses = completedContainers;
   }
 
   public RMNode getRMNode() {
     return rmNode;
   }
 
-  public Map<ApplicationId, List<Container>> getContainers() {
-    return containers;
+  public List<ContainerStatus> getNewlyLaunchedContainers() {
+    return newlyLaunchedContainers;
   }
 
+  public List<ContainerStatus> getCompletedContainers() {
+    return completedContainersStatuses;
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java Tue Sep 13 22:49:27 2011
@@ -39,10 +39,9 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.Lock;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ContainerToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -51,7 +50,6 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
@@ -67,7 +65,6 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@@ -75,6 +72,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
@@ -84,6 +82,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.api.records.QueueState;
 
 @LimitedPrivate("yarn")
 @Evolving
@@ -91,7 +90,7 @@ public class FifoScheduler implements Re
 
   private static final Log LOG = LogFactory.getLog(FifoScheduler.class);
 
-  private final RecordFactory recordFactory = 
+  private static final RecordFactory recordFactory = 
     RecordFactoryProvider.getRecordFactory(null);
 
   Configuration conf;
@@ -105,8 +104,7 @@ public class FifoScheduler implements Re
 
   private static final int MINIMUM_MEMORY = 1024;
 
-  private static final String FIFO_PREFIX = 
-    YarnConfiguration.RM_PREFIX + "fifo.";
+  private static final String FIFO_PREFIX =  "yarn.scheduler.fifo.";
   @Private
   public static final String MINIMUM_ALLOCATION = 
     FIFO_PREFIX + "minimum-allocation-mb";
@@ -147,6 +145,7 @@ public class FifoScheduler implements Re
       queueInfo.setCapacity(100.0f);
       queueInfo.setMaximumCapacity(100.0f);
       queueInfo.setChildQueues(new ArrayList<QueueInfo>());
+      queueInfo.setQueueState(QueueState.RUNNING);
       return queueInfo;
     }
 
@@ -235,7 +234,11 @@ public class FifoScheduler implements Re
              "Trying to release container not owned by app or with invalid id",
              application.getApplicationId(), releasedContainer);
       }
-      containerCompleted(rmContainer, RMContainerEventType.RELEASED);
+      containerCompleted(rmContainer,
+          SchedulerUtils.createAbnormalContainerStatus(
+              releasedContainer, 
+              SchedulerUtils.RELEASED_CONTAINER),
+          RMContainerEventType.RELEASED);
     }
 
     if (!ask.isEmpty()) {
@@ -313,7 +316,11 @@ public class FifoScheduler implements Re
 
     // Kill all 'live' containers
     for (RMContainer container : application.getLiveContainers()) {
-      containerCompleted(container, RMContainerEventType.KILL);
+      containerCompleted(container, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              container.getContainerId(), 
+              SchedulerUtils.COMPLETED_APPLICATION),
+          RMContainerEventType.KILL);
     }
 
     // Clean up pending requests, metrics etc.
@@ -543,25 +550,22 @@ public class FifoScheduler implements Re
     return assignedContainers;
   }
 
-  private synchronized void nodeUpdate(RMNode rmNode,
-      Map<ApplicationId, List<Container>> remoteContainers) {
+  private synchronized void nodeUpdate(RMNode rmNode, 
+      List<ContainerStatus> newlyLaunchedContainers,
+      List<ContainerStatus> completedContainers) {
     SchedulerNode node = getNode(rmNode.getNodeID());
     
-    for (List<Container> appContainers : remoteContainers.values()) {
-      for (Container container : appContainers) {
-        /* make sure the scheduler hasnt already removed the applications */
-        if (getApplication(container.getId().getAppAttemptId()) != null) {
-          if (container.getState() == ContainerState.RUNNING) {
-            containerLaunchedOnNode(container, node);
-          } else { // has to COMPLETE
-            containerCompleted(getRMContainer(container.getId()), 
-                RMContainerEventType.FINISHED);
-          }
-        }
-        else {
-          LOG.warn("Scheduler not tracking application " + container.getId().getAppAttemptId());
-        }
-      }
+    // Processing the newly launched containers
+    for (ContainerStatus launchedContainer : newlyLaunchedContainers) {
+      containerLaunchedOnNode(launchedContainer.getContainerId(), node);
+    }
+
+    // Process completed containers
+    for (ContainerStatus completedContainer : completedContainers) {
+      ContainerId containerId = completedContainer.getContainerId();
+      LOG.info("DEBUG --- Container FINISHED: " + containerId);
+      containerCompleted(getRMContainer(containerId), 
+          completedContainer, RMContainerEventType.FINISHED);
     }
 
     if (Resources.greaterThanOrEqual(node.getAvailableResource(),
@@ -599,7 +603,8 @@ public class FifoScheduler implements Re
       NodeUpdateSchedulerEvent nodeUpdatedEvent = 
       (NodeUpdateSchedulerEvent)event;
       nodeUpdate(nodeUpdatedEvent.getRMNode(), 
-          nodeUpdatedEvent.getContainers());
+          nodeUpdatedEvent.getNewlyLaunchedContainers(),
+          nodeUpdatedEvent.getCompletedContainers());
     }
     break;
     case APP_ADDED:
@@ -625,7 +630,11 @@ public class FifoScheduler implements Re
     {
       ContainerExpiredSchedulerEvent containerExpiredEvent = 
           (ContainerExpiredSchedulerEvent) event;
-      containerCompleted(getRMContainer(containerExpiredEvent.getContainerId()), 
+      ContainerId containerid = containerExpiredEvent.getContainerId();
+      containerCompleted(getRMContainer(containerid), 
+          SchedulerUtils.createAbnormalContainerStatus(
+              containerid, 
+              SchedulerUtils.EXPIRED_CONTAINER),
           RMContainerEventType.EXPIRE);
     }
     break;
@@ -634,23 +643,23 @@ public class FifoScheduler implements Re
     }
   }
 
-  private void containerLaunchedOnNode(Container container, SchedulerNode node) {
+  private void containerLaunchedOnNode(ContainerId containerId, SchedulerNode node) {
     // Get the application for the finished container
-    ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
+    ApplicationAttemptId applicationAttemptId = containerId.getApplicationAttemptId();
     SchedulerApp application = getApplication(applicationAttemptId);
     if (application == null) {
       LOG.info("Unknown application: " + applicationAttemptId + 
-          " launched container " + container.getId() +
+          " launched container " + containerId +
           " on node: " + node);
       return;
     }
     
-    application.containerLaunchedOnNode(container.getId());
+    application.containerLaunchedOnNode(containerId);
   }
 
   @Lock(FifoScheduler.class)
   private synchronized void containerCompleted(RMContainer rmContainer,
-      RMContainerEventType event) {
+      ContainerStatus containerStatus, RMContainerEventType event) {
     if (rmContainer == null) {
       LOG.info("Null container completed...");
       return;
@@ -658,7 +667,7 @@ public class FifoScheduler implements Re
 
     // Get the application for the finished container
     Container container = rmContainer.getContainer();
-    ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId();
+    ApplicationAttemptId applicationAttemptId = container.getId().getApplicationAttemptId();
     SchedulerApp application = getApplication(applicationAttemptId);
     
     // Get the node on which the container was allocated
@@ -673,7 +682,7 @@ public class FifoScheduler implements Re
     }
 
     // Inform the application
-    application.containerCompleted(rmContainer, event);
+    application.containerCompleted(rmContainer, containerStatus, event);
 
     // Inform the node
     node.releaseContainer(container);
@@ -692,7 +701,11 @@ public class FifoScheduler implements Re
     SchedulerNode node = getNode(nodeInfo.getNodeID());
     // Kill running containers
     for(RMContainer container : node.getRunningContainers()) {
-      containerCompleted(container, RMContainerEventType.KILL);
+      containerCompleted(container, 
+          SchedulerUtils.createAbnormalContainerStatus(
+              container.getContainerId(), 
+              SchedulerUtils.LOST_CONTAINER),
+              RMContainerEventType.KILL);
     }
     
     //Remove the node
@@ -738,7 +751,7 @@ public class FifoScheduler implements Re
   
   private RMContainer getRMContainer(ContainerId containerId) {
     SchedulerApp application = 
-        getApplication(containerId.getAppAttemptId());
+        getApplication(containerId.getApplicationAttemptId());
     return (application == null) ? null : application.getRMContainer(containerId);
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java Tue Sep 13 22:49:27 2011
@@ -35,7 +35,6 @@ import org.apache.hadoop.yarn.factories.
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.admin.AdminSecurityInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
 import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
 import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
@@ -146,8 +145,8 @@ public class RMAdmin extends Configured 
 
     // Create the client
     final String adminAddress =
-      conf.get(RMConfig.ADMIN_ADDRESS,
-          RMConfig.DEFAULT_ADMIN_BIND_ADDRESS);
+      conf.get(YarnConfiguration.RM_ADMIN_ADDRESS,
+          YarnConfiguration.RM_ADMIN_ADDRESS);
     final YarnRPC rpc = YarnRPC.create(conf);
     
     if (UserGroupInformation.isSecurityEnabled()) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java Tue Sep 13 22:49:27 2011
@@ -56,7 +56,7 @@ class AppsBlock extends HtmlBlock {
         tbody();
     int i = 0;
     for (RMApp app : list.apps.values()) {
-      String appId = Apps.toString(app.getApplicationId());
+      String appId = app.getApplicationId().toString();
       String trackingUrl = app.getTrackingUrl();
       String ui = trackingUrl == null || trackingUrl.isEmpty() ? "UNASSIGNED" :
           (app.getFinishTime() == 0 ? "ApplicationMaster" : "JobHistory");

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java Tue Sep 13 22:49:27 2011
@@ -60,7 +60,7 @@ class AppsList implements ToJSON {
       } else {
         out.append(",\n");
       }
-      String appID = Apps.toString(app.getApplicationId());
+      String appID = app.getApplicationId().toString();
       String trackingUrl = app.getTrackingUrl();
       String ui = trackingUrl == null ? "UNASSIGNED" :
           (app.getFinishTime() == 0 ? "ApplicationMaster" : "JobHistory");

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java Tue Sep 13 22:49:27 2011
@@ -24,7 +24,7 @@ import com.google.inject.servlet.Request
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Queue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
@@ -43,7 +43,7 @@ class CapacitySchedulerPage extends RmVi
 
   @RequestScoped
   static class Parent {
-    Queue queue;
+    CSQueue queue;
   }
 
   public static class QueueBlock extends HtmlBlock {
@@ -56,8 +56,8 @@ class CapacitySchedulerPage extends RmVi
     @Override
     public void render(Block html) {
       UL<Hamlet> ul = html.ul();
-      Queue parentQueue = parent.queue;
-      for (Queue queue : parentQueue.getChildQueues()) {
+      CSQueue parentQueue = parent.queue;
+      for (CSQueue queue : parentQueue.getChildQueues()) {
         float used = queue.getUsedCapacity();
         float set = queue.getCapacity();
         float delta = Math.abs(set - used) + 0.001f;
@@ -109,7 +109,7 @@ class CapacitySchedulerPage extends RmVi
               span().$style(Q_END)._("100% ")._().
               span(".q", "default")._()._();
       } else {
-        Queue root = cs.getRootQueue();
+        CSQueue root = cs.getRootQueue();
         parent.queue = root;
         float used = root.getUsedCapacity();
         float set = root.getCapacity();

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java Tue Sep 13 22:49:27 2011
@@ -18,20 +18,177 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import com.google.inject.Inject;
+import com.google.inject.servlet.RequestScoped;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
+import static org.apache.hadoop.yarn.util.StringHelper.*;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
 
 class DefaultSchedulerPage extends RmView {
+  static final String _Q = ".ui-state-default.ui-corner-all";
+  static final float WIDTH_F = 0.8f;
+  static final String Q_END = "left:101%";
+  static final String OVER = "font-size:1px;background:rgba(255, 140, 0, 0.8)";
+  static final String UNDER = "font-size:1px;background:rgba(50, 205, 50, 0.8)";
+  static final float EPSILON = 1e-8f;
+
+  static class QueueInfoBlock extends HtmlBlock {
+    final RMContext rmContext;
+    final FifoScheduler fs;
+    final String qName;
+    final QueueInfo qInfo;
+
+    @Inject QueueInfoBlock(RMContext context, ViewContext ctx, ResourceManager rm) {
+      super(ctx);
+      this.rmContext = context;
+
+      fs = (FifoScheduler) rm.getResourceScheduler();
+      qName = fs.getQueueInfo("",false,false).getQueueName();
+      qInfo = fs.getQueueInfo(qName,true,true);
+    }
 
-  static class QueueBlock extends HtmlBlock {
     @Override public void render(Block html) {
-      html.h2("Under construction");
+      String minmemoryresource = 
+                Integer.toString(fs.getMinimumResourceCapability().getMemory());
+      String maxmemoryresource = 
+                Integer.toString(fs.getMaximumResourceCapability().getMemory());
+      String qstate = (qInfo.getQueueState() == QueueState.RUNNING) ?
+                       "Running" :
+                           (qInfo.getQueueState() == QueueState.STOPPED) ?
+                                  "Stopped" : "Unknown";
+
+      int usedNodeMem      = 0;
+      int availNodeMem     = 0;
+      int totNodeMem       = 0;
+      int nodeContainers   = 0;
+
+      for (RMNode ni : this.rmContext.getRMNodes().values()) {
+        usedNodeMem += fs.getUsedResource(ni.getNodeID()).getMemory();
+        availNodeMem += fs.getAvailableResource(ni.getNodeID()).getMemory();
+        totNodeMem += ni.getTotalCapability().getMemory();
+        nodeContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers();
+      }
+
+      info("\'" + qName + "\' Queue Status").
+        _("Queue State:" , qstate).
+        _("Minimum Queue Memory Capacity:" , minmemoryresource).
+        _("Maximum Queue Memory Capacity:" , maxmemoryresource).
+        _("Number of Nodes:" , Integer.toString(this.rmContext.getRMNodes().size())).
+        _("Used Node Capacity:" , Integer.toString(usedNodeMem)).
+        _("Available Node Capacity:" , Integer.toString(availNodeMem)).
+        _("Total Node Capacity:" , Integer.toString(totNodeMem)).
+        _("Number of Node Containers:" , Integer.toString(nodeContainers));
+
+      html._(InfoBlock.class);
     }
   }
 
+  static class QueuesBlock extends HtmlBlock {
+    final FifoScheduler fs;
+    final String qName;
+    final QueueInfo qInfo;
+
+    @Inject QueuesBlock(ResourceManager rm) {
+      fs = (FifoScheduler) rm.getResourceScheduler();
+      qName = fs.getQueueInfo("",false,false).getQueueName();
+      qInfo = fs.getQueueInfo(qName,false,false);
+    }
+
+    @Override
+    public void render(Block html) {
+      UL<DIV<DIV<Hamlet>>> ul = html.
+        div("#cs-wrapper.ui-widget").
+          div(".ui-widget-header.ui-corner-top").
+            _("FifoScheduler Queue")._().
+          div("#cs.ui-widget-content.ui-corner-bottom").
+            ul();
+
+      if (fs == null) {
+        ul.
+          li().
+            a(_Q).$style(width(WIDTH_F)).
+              span().$style(Q_END)._("100% ")._().
+              span(".q", "default")._()._();
+      } else {
+        float used = qInfo.getCurrentCapacity() / 100.0f;
+        float set = qInfo.getCapacity() / 100.0f;
+        float delta = Math.abs(set - used) + 0.001f;
+        ul.
+          li().
+            a(_Q).$style(width(WIDTH_F)).
+              $title(join("used:", percent(used))).
+              span().$style(Q_END)._("100%")._().
+              span().$style(join(width(delta), ';', used > set ? OVER : UNDER,
+                ';', used > set ? left(set) : left(used)))._(".")._().
+              span(".q", qName)._().
+            _(QueueInfoBlock.class)._();
+      }
+
+      ul._()._().
+      script().$type("text/javascript").
+          _("$('#cs').hide();")._()._().
+      _(AppsBlock.class);
+    }
+  }
+
+
+  @Override protected void postHead(Page.HTML<_> html) {
+    html.
+      style().$type("text/css").
+        _("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }",
+          "#cs ul { list-style: none }",
+          "#cs a { font-weight: normal; margin: 2px; position: relative }",
+          "#cs a span { font-weight: normal; font-size: 80% }",
+          "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }",
+          "table.info tr th {width: 50%}")._(). // to center info table
+      script("/static/jt/jquery.jstree.js").
+      script().$type("text/javascript").
+        _("$(function() {",
+          "  $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');",
+          "  $('#cs').bind('loaded.jstree', function (e, data) {",
+          "    data.inst.open_all(); }).",
+          "    jstree({",
+          "    core: { animation: 188, html_titles: true },",
+          "    plugins: ['themeroller', 'html_data', 'ui'],",
+          "    themeroller: { item_open: 'ui-icon-minus',",
+          "      item_clsd: 'ui-icon-plus', item_leaf: 'ui-icon-gear'",
+          "    }",
+          "  });",
+          "  $('#cs').bind('select_node.jstree', function(e, data) {",
+          "    var q = $('.q', data.rslt.obj).first().text();",
+            "    if (q == 'root') q = '';",
+          "    $('#apps').dataTable().fnFilter(q, 3);",
+          "  });",
+          "  $('#cs').show();",
+          "});")._();
+  }
+
   @Override protected Class<? extends SubView> content() {
-    return QueueBlock.class;
+    return QueuesBlock.class;
+  }
+
+  static String percent(float f) {
+    return String.format("%.1f%%", f * 100);
+  }
+
+  static String width(float f) {
+    return String.format("width:%.1f%%", f * 100);
+  }
+
+  static String left(float f) {
+    return String.format("left:%.1f%%", f * 100);
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java Tue Sep 13 22:49:27 2011
@@ -52,8 +52,8 @@ class NodesPage extends RmView {
           thead().
           tr().
           th(".rack", "Rack").
-          th(".nodeid", "Node ID").
-          th(".host", "Host").
+          th(".nodeaddress", "Node Address").
+          th(".nodehttpaddress", "Node HTTP Address").
           th(".healthStatus", "Health-status").
           th(".lastHealthUpdate", "Last health-update").
           th(".healthReport", "Health-report").

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml Tue Sep 13 22:49:27 2011
@@ -1,58 +1,79 @@
 <configuration>
 
   <property>
-    <name>yarn.capacity-scheduler.maximum-applications</name>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
     <value>10000</value>
+    <description>Maximum number of applications that can be running.
+    </description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.maximum-am-resource-percent</name>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
     <value>0.1</value>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.queues</name>
+    <name>yarn.scheduler.capacity.root.queues</name>
     <value>default</value>
+    <description>The queues at the this level (root is the root queue).
+    </description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.capacity</name>
+    <name>yarn.scheduler.capacity.root.capacity</name>
     <value>100</value>
+    <description>The total capacity as a percentage out of 100 for this queue.
+    If it has child queues then this includes their capacity as well.
+    The child queues capacity should add up to their parent queue's capacity
+    or less.</description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.acl_administer_queues</name>
+    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
     <value>*</value>
+    <description>The ACL for who can administer this queue. i.e.
+    change sub queue allocations.</description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.default.capacity</name>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
     <value>100</value>
+    <description>default queue target capacity.</description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.default.user-limit-factor</name>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
     <value>1</value>
+    <description>default queue user limit a percantage from 0.0 to 1.0.
+    </description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.default.maximum-capacity</name>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
     <value>-1</value>
+    <description>the maximum capacity of the default queue -1 disables.
+    </description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.default.state</name>
+    <name>yarn.scheduler.capacity.root.default.state</name>
     <value>RUNNING</value>
+    <description>The state of the default queue.  can be RUNNING or STOPPED
+    </description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.default.acl_submit_jobs</name>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
     <value>*</value>
+    <description>The ACL of who can submit jobs to the default queue.
+    </description>
   </property>
 
   <property>
-    <name>yarn.capacity-scheduler.root.default.acl_administer_jobs</name>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
     <value>*</value>
+    <description>The ACL of who can administer jobs on the default queue.
+    </description>
   </property>
 
 </configuration>

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java Tue Sep 13 22:49:27 2011
@@ -25,6 +25,7 @@ import java.util.Map;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -53,9 +54,10 @@ public class MockNM {
   }
 
   public void containerStatus(Container container) throws Exception {
-    Map<ApplicationId, List<Container>> conts = new HashMap<ApplicationId, List<Container>>();
-    conts.put(container.getId().getAppId(), Arrays
-        .asList(new Container[] { container }));
+    Map<ApplicationId, List<ContainerStatus>> conts = 
+        new HashMap<ApplicationId, List<ContainerStatus>>();
+    conts.put(container.getId().getApplicationAttemptId().getApplicationId(), 
+        Arrays.asList(new ContainerStatus[] { container.getContainerStatus() }));
     nodeHeartbeat(conts, true);
   }
 
@@ -76,16 +78,16 @@ public class MockNM {
   }
 
   public HeartbeatResponse nodeHeartbeat(boolean b) throws Exception {
-    return nodeHeartbeat(new HashMap<ApplicationId, List<Container>>(), b);
+    return nodeHeartbeat(new HashMap<ApplicationId, List<ContainerStatus>>(), b);
   }
 
   public HeartbeatResponse nodeHeartbeat(Map<ApplicationId, 
-      List<Container>> conts, boolean isHealthy) throws Exception {
+      List<ContainerStatus>> conts, boolean isHealthy) throws Exception {
     NodeHeartbeatRequest req = Records.newRecord(NodeHeartbeatRequest.class);
     NodeStatus status = Records.newRecord(NodeStatus.class);
     status.setNodeId(nodeId);
-    for (Map.Entry<ApplicationId, List<Container>> entry : conts.entrySet()) {
-      status.setContainers(entry.getKey(), entry.getValue());
+    for (Map.Entry<ApplicationId, List<ContainerStatus>> entry : conts.entrySet()) {
+      status.setContainersStatuses(entry.getValue());
     }
     NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class);
     healthStatus.setHealthReport("");

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java Tue Sep 13 22:49:27 2011
@@ -60,13 +60,9 @@ public class MockRM extends ResourceMana
 
   public void waitForState(ApplicationId appId, RMAppState finalState) 
       throws Exception {
+    RMApp app = getRMContext().getRMApps().get(appId);
+    Assert.assertNotNull("app shouldn't be null", app);
     int timeoutSecs = 0;
-    RMApp app = null;
-    while ((app == null) && timeoutSecs++ < 20) {
-      app = getRMContext().getRMApps().get(appId);
-      Thread.sleep(500);
-    }
-    timeoutSecs = 0;
     while (!finalState.equals(app.getState()) &&
         timeoutSecs++ < 20) {
       System.out.println("App State is : " + app.getState() +
@@ -95,6 +91,7 @@ public class MockRM extends ResourceMana
     req.setApplicationSubmissionContext(sub);
     
     client.submitApplication(req);
+    // make sure app is immediately available after submit
     waitForState(appId, RMAppState.ACCEPTED);
     return getRMContext().getRMApps().get(appId);
   }
@@ -131,7 +128,7 @@ public class MockRM extends ResourceMana
 
   @Override
   protected ClientRMService createClientRMService() {
-    return new ClientRMService(getRMContext(), getResourceScheduler()) {
+    return new ClientRMService(getRMContext(), getResourceScheduler(), rmAppManager) {
       @Override
       public void start() {
         //override to not start rpc handler

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java Tue Sep 13 22:49:27 2011
@@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -53,9 +54,7 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
-import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 
@@ -133,10 +132,19 @@ public class NodeManager implements Cont
   
   int responseID = 0;
   
+  private List<ContainerStatus> getContainerStatuses(Map<ApplicationId, List<Container>> containers) {
+    List<ContainerStatus> containerStatuses = new ArrayList<ContainerStatus>();
+    for (List<Container> appContainers : containers.values()) {
+      for (Container container : appContainers) {
+        containerStatuses.add(container.getContainerStatus());
+      }
+    }
+    return containerStatuses;
+  }
   public void heartbeat() throws IOException {
     NodeStatus nodeStatus = 
       org.apache.hadoop.yarn.server.resourcemanager.NodeManager.createNodeStatus(
-          nodeId, containers);
+          nodeId, getContainerStatuses(containers));
     nodeStatus.setResponseId(responseID);
     NodeHeartbeatRequest request = recordFactory
         .newRecordInstance(NodeHeartbeatRequest.class);
@@ -147,11 +155,15 @@ public class NodeManager implements Cont
   }
 
   @Override
-  synchronized public StartContainerResponse startContainer(StartContainerRequest request) throws YarnRemoteException {
-    ContainerLaunchContext containerLaunchContext = request.getContainerLaunchContext();
+  synchronized public StartContainerResponse startContainer(
+      StartContainerRequest request) 
+  throws YarnRemoteException {
+    ContainerLaunchContext containerLaunchContext = 
+        request.getContainerLaunchContext();
     
-    ApplicationId applicationId = containerLaunchContext.getContainerId()
-        .getAppId();
+    ApplicationId applicationId = 
+        containerLaunchContext.getContainerId().getApplicationAttemptId().
+        getApplicationId();
 
     List<Container> applicationContainers = containers.get(applicationId);
     if (applicationContainers == null) {
@@ -161,7 +173,8 @@ public class NodeManager implements Cont
     
     // Sanity check
     for (Container container : applicationContainers) {
-      if (container.getId().compareTo(containerLaunchContext.getContainerId()) == 0) {
+      if (container.getId().compareTo(containerLaunchContext.getContainerId()) 
+          == 0) {
         throw new IllegalStateException(
             "Container " + containerLaunchContext.getContainerId() + 
             " already setup on node " + containerManagerAddress);
@@ -201,7 +214,8 @@ public class NodeManager implements Cont
   synchronized public StopContainerResponse stopContainer(StopContainerRequest request) 
   throws YarnRemoteException {
     ContainerId containerID = request.getContainerId();
-    String applicationId = String.valueOf(containerID.getAppId().getId());
+    String applicationId = String.valueOf(
+        containerID.getApplicationAttemptId().getApplicationId().getId());
     
     // Mark the container as COMPLETE
     List<Container> applicationContainers = containers.get(applicationId);
@@ -250,17 +264,31 @@ public class NodeManager implements Cont
 
   @Override
   synchronized public GetContainerStatusResponse getContainerStatus(GetContainerStatusRequest request) throws YarnRemoteException {
-    ContainerId containerID = request.getContainerId();
-    GetContainerStatusResponse response = recordFactory.newRecordInstance(GetContainerStatusResponse.class);
+    ContainerId containerId = request.getContainerId();
+    List<Container> appContainers = 
+        containers.get(
+            containerId.getApplicationAttemptId().getApplicationId());
+    Container container = null;
+    for (Container c : appContainers) {
+      if (c.getId().equals(containerId)) {
+        container = c;
+      }
+    }
+    GetContainerStatusResponse response = 
+        recordFactory.newRecordInstance(GetContainerStatusResponse.class);
+    if (container != null && container.getContainerStatus() != null) {
+      response.setStatus(container.getContainerStatus());
+    }
     return response;
   }
 
-  public static org.apache.hadoop.yarn.server.api.records.NodeStatus createNodeStatus(
-      NodeId nodeId, Map<ApplicationId, List<Container>> containers) {
+  public static org.apache.hadoop.yarn.server.api.records.NodeStatus 
+  createNodeStatus(NodeId nodeId, List<ContainerStatus> containers) {
     RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
-    org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
+    org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus = 
+        recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
     nodeStatus.setNodeId(nodeId);
-    nodeStatus.addAllContainers(containers);
+    nodeStatus.setContainersStatuses(containers);
     NodeHealthStatus nodeHealthStatus = 
       recordFactory.newRecordInstance(NodeHealthStatus.class);
     nodeHealthStatus.setIsNodeHealthy(true);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java Tue Sep 13 22:49:27 2011
@@ -32,6 +32,7 @@ import junit.framework.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -47,7 +48,6 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAppManager;
-import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
@@ -153,14 +153,14 @@ public class TestAppManager{
 
     public TestRMAppManager(RMContext context, Configuration conf) {
       super(context, null, null, null, conf);
-      setCompletedAppsMax(RMConfig.DEFAULT_EXPIRE_APPLICATIONS_COMPLETED_MAX);
+      setCompletedAppsMax(YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
     }
 
     public TestRMAppManager(RMContext context, ClientToAMSecretManager
         clientToAMSecretManager, YarnScheduler scheduler,
         ApplicationMasterService masterService, Configuration conf) {
       super(context, clientToAMSecretManager, scheduler, masterService, conf);
-      setCompletedAppsMax(RMConfig.DEFAULT_EXPIRE_APPLICATIONS_COMPLETED_MAX);
+      setCompletedAppsMax(YarnConfiguration.DEFAULT_RM_MAX_COMPLETED_APPLICATIONS);
     }
 
     public void checkAppNumCompletedLimit() {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java Tue Sep 13 22:49:27 2011
@@ -66,11 +66,11 @@ public class TestApplicationCleanup {
     //kick the scheduler
     nm1.nodeHeartbeat(true);
     List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
-        new ArrayList<ContainerId>()).getNewContainerList();
+        new ArrayList<ContainerId>()).getAllocatedContainers();
     int contReceived = conts.size();
     while (contReceived < request) {
       conts = am.allocate(new ArrayList<ResourceRequest>(),
-          new ArrayList<ContainerId>()).getNewContainerList();
+          new ArrayList<ContainerId>()).getAllocatedContainers();
       contReceived += conts.size();
       Log.info("Got " + contReceived + " containers. Waiting to get " + request);
       Thread.sleep(2000);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java Tue Sep 13 22:49:27 2011
@@ -92,12 +92,12 @@ public class TestFifoScheduler {
 
     // kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0
     nm1.nodeHeartbeat(true);
-    while (am1Response.getNewContainerCount() < 1) {
+    while (am1Response.getAllocatedContainers().size() < 1) {
       LOG.info("Waiting for containers to be created for app 1...");
       Thread.sleep(1000);
       am1Response = am1.schedule();
     }
-    while (am2Response.getNewContainerCount() < 1) {
+    while (am2Response.getAllocatedContainers().size() < 1) {
       LOG.info("Waiting for containers to be created for app 2...");
       Thread.sleep(1000);
       am2Response = am2.schedule();
@@ -105,12 +105,12 @@ public class TestFifoScheduler {
     // kick the scheduler, nothing given remaining 2 GB.
     nm2.nodeHeartbeat(true);
 
-    List<Container> allocated1 = am1Response.getNewContainerList();
+    List<Container> allocated1 = am1Response.getAllocatedContainers();
     Assert.assertEquals(1, allocated1.size());
     Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
     Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
 
-    List<Container> allocated2 = am2Response.getNewContainerList();
+    List<Container> allocated2 = am2Response.getAllocatedContainers();
     Assert.assertEquals(1, allocated2.size());
     Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory());
     Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
@@ -137,7 +137,7 @@ public class TestFifoScheduler {
       Thread.sleep(1000);
     }
     Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
-    Assert.assertEquals(1, am1.schedule().getFinishedContainerList().size());
+    Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
     Assert.assertEquals(5 * GB, rm.getResourceScheduler().getUsedResource(
         nm1.getNodeId()).getMemory());
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java Tue Sep 13 22:49:27 2011
@@ -86,11 +86,11 @@ public class TestRM {
     //kick the scheduler
     nm1.nodeHeartbeat(true);
     List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
-        new ArrayList<ContainerId>()).getNewContainerList();
+        new ArrayList<ContainerId>()).getAllocatedContainers();
     int contReceived = conts.size();
     while (contReceived < 3) {//only 3 containers are available on node1
       conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
-          new ArrayList<ContainerId>()).getNewContainerList());
+          new ArrayList<ContainerId>()).getAllocatedContainers());
       contReceived = conts.size();
       LOG.info("Got " + contReceived + " containers. Waiting to get " + 3);
       Thread.sleep(2000);
@@ -100,11 +100,11 @@ public class TestRM {
     //send node2 heartbeat
     nm2.nodeHeartbeat(true);
     conts = am.allocate(new ArrayList<ResourceRequest>(),
-        new ArrayList<ContainerId>()).getNewContainerList();
+        new ArrayList<ContainerId>()).getAllocatedContainers();
     contReceived = conts.size();
     while (contReceived < 10) {
       conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
-          new ArrayList<ContainerId>()).getNewContainerList());
+          new ArrayList<ContainerId>()).getAllocatedContainers());
       contReceived = conts.size();
       LOG.info("Got " + contReceived + " containers. Waiting to get " + 10);
       Thread.sleep(2000);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java Tue Sep 13 22:49:27 2011
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.Keys;
+import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 
 import org.apache.hadoop.net.NetUtils;
 
@@ -228,7 +229,8 @@ public class TestRMAuditLogger {
   public void testRMAuditLoggerWithIP() throws Exception {
     Configuration conf = new Configuration();
     // start the IPC server
-    Server server = RPC.getServer(new MyTestRPCServer(), "0.0.0.0", 0, conf);
+    Server server = RPC.getServer(TestProtocol.class,
+        new MyTestRPCServer(), "0.0.0.0", 0, 5, true, conf, null);
     server.start();
 
     InetSocketAddress addr = NetUtils.getConnectAddress(server);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java Tue Sep 13 22:49:27 2011
@@ -27,19 +27,13 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationState;
 import org.apache.hadoop.yarn.api.records.ApplicationStatus;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
-import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.util.Records;
 
 import com.google.common.collect.Lists;
@@ -218,10 +212,10 @@ public abstract class MockAsm extends Mo
   }
 
   public static RMApp newApplication(int i) {
-    final ApplicationId id = newAppID(i);
+    final ApplicationAttemptId appAttemptId = newAppAttemptID(newAppID(i), 0);
     final Container masterContainer = Records.newRecord(Container.class);
     ContainerId containerId = Records.newRecord(ContainerId.class);
-    containerId.setAppId(id);
+    containerId.setApplicationAttemptId(appAttemptId);
     masterContainer.setId(containerId);
     masterContainer.setNodeHttpAddress("node:port");
     final String user = newUserName();
@@ -233,7 +227,7 @@ public abstract class MockAsm extends Mo
     return new ApplicationBase() {
       @Override
       public ApplicationId getApplicationId() {
-        return id;
+        return appAttemptId.getApplicationId();
       }
       @Override
       public String getUser() {

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMLaunchFailure.java Tue Sep 13 22:49:27 2011
@@ -46,7 +46,6 @@ import org.apache.hadoop.yarn.factories.
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
-import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java Tue Sep 13 22:49:27 2011
@@ -50,7 +50,6 @@ import org.apache.hadoop.yarn.factories.
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.ApplicationTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
-import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java Tue Sep 13 22:49:27 2011
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -36,7 +37,6 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.api.records.RegistrationResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.NMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager;
-import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
@@ -65,7 +65,7 @@ public class TestNMExpiry {
 
     @Override
     public void init(Configuration conf) {
-      conf.setLong(RMConfig.NM_EXPIRY_INTERVAL, 1000);
+      conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1000);
       super.init(conf);
     }
     @Override

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java Tue Sep 13 22:49:27 2011
@@ -117,7 +117,7 @@ public class TestRMAppTransitions {
     String queue = MockApps.newQueue();
     Configuration conf = new YarnConfiguration();
     // ensure max retries set to known value
-    conf.setInt("yarn.server.resourcemanager.application.max.retries", maxRetries);
+    conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, maxRetries);
     ApplicationSubmissionContext submissionContext = null; 
     String clientTokenStr = "bogusstring";
     ApplicationStore appStore = mock(ApplicationStore.class);

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java Tue Sep 13 22:49:27 2011
@@ -38,8 +38,8 @@ public class TestApplicationLimits {
     when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB));
     when(csContext.getClusterResources()).thenReturn(Resources.createResource(10 * 16 * GB));
     
-    Map<String, Queue> queues = new HashMap<String, Queue>();
-    Queue root = 
+    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
+    CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, "root", 
             queues, queues, 
             CapacityScheduler.queueComparator, 
@@ -108,8 +108,8 @@ public class TestApplicationLimits {
     Resource clusterResource = Resources.createResource(100 * 16 * GB);
     when(csContext.getClusterResources()).thenReturn(clusterResource);
     
-    Map<String, Queue> queues = new HashMap<String, Queue>();
-    Queue root = 
+    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
+    CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, "root", 
             queues, queues, 
             CapacityScheduler.queueComparator, 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java?rev=1170378&r1=1170377&r2=1170378&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java Tue Sep 13 22:49:27 2011
@@ -30,9 +30,9 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.Application;
-import org.apache.hadoop.yarn.server.resourcemanager.RMConfig;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.Task;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
@@ -55,7 +55,7 @@ public class TestCapacityScheduler {
     resourceManager = new ResourceManager(store);
     CapacitySchedulerConfiguration csConf = 
       new CapacitySchedulerConfiguration();
-    csConf.setClass(RMConfig.RESOURCE_SCHEDULER, 
+    csConf.setClass(YarnConfiguration.RM_SCHEDULER, 
         CapacityScheduler.class, ResourceScheduler.class);
     setupQueueConfiguration(csConf);
     resourceManager.init(csConf);