You are viewing a plain text version of this content. The canonical link for it is here.
Posted to yarn-commits@hadoop.apache.org by su...@apache.org on 2013/01/22 20:33:13 UTC

svn commit: r1437113 [5/5] - in /hadoop/common/branches/branch-trunk-win/hadoop-yarn-project: ./ hadoop-yarn/ hadoop-yarn/conf/ hadoop-yarn/hadoop-yarn-api/ hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ hadoop-yarn/hadoop-yarn-a...

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java Tue Jan 22 19:33:02 2013
@@ -17,8 +17,17 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -38,6 +47,8 @@ import org.apache.hadoop.yarn.conf.YarnC
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceCalculator;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
@@ -53,6 +64,8 @@ public class TestApplicationLimits {
 
   LeafQueue queue;
   
+  private final ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
+  
   @Before
   public void setUp() throws IOException {
     CapacitySchedulerConfiguration csConf = 
@@ -64,23 +77,27 @@ public class TestApplicationLimits {
     CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
     when(csContext.getConfiguration()).thenReturn(csConf);
     when(csContext.getConf()).thenReturn(conf);
-    when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
-    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16*GB));
-    when(csContext.getClusterResources()).thenReturn(Resources.createResource(10 * 16 * GB));
-    
+    when(csContext.getMinimumResourceCapability()).
+        thenReturn(Resources.createResource(GB, 1));
+    when(csContext.getMaximumResourceCapability()).
+        thenReturn(Resources.createResource(16*GB, 32));
+    when(csContext.getClusterResources()).
+        thenReturn(Resources.createResource(10 * 16 * GB, 10 * 32));
+    when(csContext.getApplicationComparator()).
+        thenReturn(CapacityScheduler.applicationComparator);
+    when(csContext.getQueueComparator()).
+        thenReturn(CapacityScheduler.queueComparator);
+    when(csContext.getResourceCalculator()).
+        thenReturn(resourceCalculator);
+
     Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
     CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, "root", 
             queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator, 
             TestUtils.spyHook);
 
     
-    queue = spy(
-        new LeafQueue(csContext, A, root, 
-                      CapacityScheduler.applicationComparator, null)
-        );
+    queue = spy(new LeafQueue(csContext, A, root, null));
 
     // Stub out ACL checks
     doReturn(true).
@@ -132,21 +149,23 @@ public class TestApplicationLimits {
     when(csContext.getConfiguration()).thenReturn(csConf);
     when(csContext.getConf()).thenReturn(conf);
     when(csContext.getMinimumResourceCapability()).
-        thenReturn(Resources.createResource(GB));
+        thenReturn(Resources.createResource(GB, 1));
     when(csContext.getMaximumResourceCapability()).
-        thenReturn(Resources.createResource(16*GB));
+        thenReturn(Resources.createResource(16*GB, 16));
+    when(csContext.getApplicationComparator()).
+        thenReturn(CapacityScheduler.applicationComparator);
+    when(csContext.getQueueComparator()).
+        thenReturn(CapacityScheduler.queueComparator);
+    when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
     
     // Say cluster has 100 nodes of 16G each
-    Resource clusterResource = Resources.createResource(100 * 16 * GB);
+    Resource clusterResource = Resources.createResource(100 * 16 * GB, 100 * 16);
     when(csContext.getClusterResources()).thenReturn(clusterResource);
     
     Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
     CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, "root", 
-            queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator,
-            TestUtils.spyHook);
+            queues, queues, TestUtils.spyHook);
 
     LeafQueue queue = (LeafQueue)queues.get(A);
     
@@ -205,8 +224,12 @@ public class TestApplicationLimits {
         );
 
     // should return -1 if per queue setting not set
-    assertEquals((int)csConf.UNDEFINED, csConf.getMaximumApplicationsPerQueue(queue.getQueuePath()));
-    int expectedMaxApps =  (int)(csConf.DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS * 
+    assertEquals(
+        (int)CapacitySchedulerConfiguration.UNDEFINED, 
+        csConf.getMaximumApplicationsPerQueue(queue.getQueuePath()));
+    int expectedMaxApps =  
+        (int)
+        (CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS * 
         queue.getAbsoluteCapacity());
     assertEquals(expectedMaxApps, queue.getMaxApplications());
 
@@ -215,8 +238,11 @@ public class TestApplicationLimits {
     assertEquals(expectedMaxAppsPerUser, queue.getMaxApplicationsPerUser());
 
     // should default to global setting if per queue setting not set
-    assertEquals((long) csConf.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT, 
-        (long) csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()));
+    assertEquals(
+        (long)CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT, 
+        (long)csConf.getMaximumApplicationMasterResourcePerQueuePercent(
+            queue.getQueuePath())
+            );
 
     // Change the per-queue max AM resources percentage.
     csConf.setFloat(
@@ -228,10 +254,7 @@ public class TestApplicationLimits {
     queues = new HashMap<String, CSQueue>();
     root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, "root", 
-            queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator,
-            TestUtils.spyHook);
+            queues, queues, TestUtils.spyHook);
     clusterResource = Resources.createResource(100 * 16 * GB);
 
     queue = (LeafQueue)queues.get(A);
@@ -257,10 +280,7 @@ public class TestApplicationLimits {
     queues = new HashMap<String, CSQueue>();
     root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, "root", 
-            queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator,
-            TestUtils.spyHook);
+            queues, queues, TestUtils.spyHook);
 
     queue = (LeafQueue)queues.get(A);
     assertEquals(9999, (int)csConf.getMaximumApplicationsPerQueue(queue.getQueuePath()));
@@ -445,6 +465,11 @@ public class TestApplicationLimits {
         thenReturn(Resources.createResource(GB));
     when(csContext.getMaximumResourceCapability()).
         thenReturn(Resources.createResource(16*GB));
+    when(csContext.getApplicationComparator()).
+        thenReturn(CapacityScheduler.applicationComparator);
+    when(csContext.getQueueComparator()).
+        thenReturn(CapacityScheduler.queueComparator);
+    when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
     
     // Say cluster has 100 nodes of 16G each
     Resource clusterResource = Resources.createResource(100 * 16 * GB);
@@ -452,10 +477,7 @@ public class TestApplicationLimits {
     
     Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
     CapacityScheduler.parseQueue(csContext, csConf, null, "root", 
-        queues, queues, 
-        CapacityScheduler.queueComparator, 
-        CapacityScheduler.applicationComparator, 
-        TestUtils.spyHook);
+        queues, queues, TestUtils.spyHook);
 
     // Manipulate queue 'a'
     LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue)queues.get(A));
@@ -490,7 +512,7 @@ public class TestApplicationLimits {
 
     // Schedule to compute 
     queue.assignContainers(clusterResource, node_0);
-    Resource expectedHeadroom = Resources.createResource(10*16*GB);
+    Resource expectedHeadroom = Resources.createResource(10*16*GB, 1);
     verify(app_0_0).setHeadroom(eq(expectedHeadroom));
 
     // Submit second application from user_0, check headroom
@@ -528,7 +550,7 @@ public class TestApplicationLimits {
     
     // Schedule to compute 
     queue.assignContainers(clusterResource, node_0); // Schedule to compute
-    expectedHeadroom = Resources.createResource(10*16*GB / 2); // changes
+    expectedHeadroom = Resources.createResource(10*16*GB / 2, 1); // changes
     verify(app_0_0).setHeadroom(eq(expectedHeadroom));
     verify(app_0_1).setHeadroom(eq(expectedHeadroom));
     verify(app_1_0).setHeadroom(eq(expectedHeadroom));
@@ -536,7 +558,7 @@ public class TestApplicationLimits {
     // Now reduce cluster size and check for the smaller headroom
     clusterResource = Resources.createResource(90*16*GB);
     queue.assignContainers(clusterResource, node_0); // Schedule to compute
-    expectedHeadroom = Resources.createResource(9*16*GB / 2); // changes
+    expectedHeadroom = Resources.createResource(9*16*GB / 2, 1); // changes
     verify(app_0_0).setHeadroom(eq(expectedHeadroom));
     verify(app_0_1).setHeadroom(eq(expectedHeadroom));
     verify(app_1_0).setHeadroom(eq(expectedHeadroom));

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java Tue Jan 22 19:33:02 2013
@@ -91,10 +91,10 @@ public class TestCapacityScheduler {
   
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
       registerNode(String hostName, int containerManagerPort, int httpPort,
-          String rackName, int memory)
+          String rackName, Resource capability)
           throws IOException {
     return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
-        hostName, containerManagerPort, httpPort, rackName, memory,
+        hostName, containerManagerPort, httpPort, rackName, capability,
         resourceManager.getResourceTrackerService(), resourceManager
             .getRMContext());
   }  
@@ -107,13 +107,15 @@ public class TestCapacityScheduler {
     // Register node1
     String host_0 = "host_0";
     org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = 
-      registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 4 * GB);
+      registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 
+          Resources.createResource(4 * GB, 1));
     nm_0.heartbeat();
     
     // Register node2
     String host_1 = "host_1";
     org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = 
-      registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 2 * GB);
+      registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 
+          Resources.createResource(2 * GB, 1));
     nm_1.heartbeat();
 
     // ResourceRequest priorities
@@ -129,10 +131,10 @@ public class TestCapacityScheduler {
     application_0.addNodeManager(host_0, 1234, nm_0);
     application_0.addNodeManager(host_1, 1234, nm_1);
 
-    Resource capability_0_0 = Resources.createResource(1 * GB);
+    Resource capability_0_0 = Resources.createResource(1 * GB, 1);
     application_0.addResourceRequestSpec(priority_1, capability_0_0);
     
-    Resource capability_0_1 = Resources.createResource(2 * GB);
+    Resource capability_0_1 = Resources.createResource(2 * GB, 1);
     application_0.addResourceRequestSpec(priority_0, capability_0_1);
 
     Task task_0_0 = new Task(application_0, priority_1, 
@@ -146,10 +148,10 @@ public class TestCapacityScheduler {
     application_1.addNodeManager(host_0, 1234, nm_0);
     application_1.addNodeManager(host_1, 1234, nm_1);
     
-    Resource capability_1_0 = Resources.createResource(3 * GB);
+    Resource capability_1_0 = Resources.createResource(3 * GB, 1);
     application_1.addResourceRequestSpec(priority_1, capability_1_0);
     
-    Resource capability_1_1 = Resources.createResource(2 * GB);
+    Resource capability_1_1 = Resources.createResource(2 * GB, 1);
     application_1.addResourceRequestSpec(priority_0, capability_1_1);
 
     Task task_1_0 = new Task(application_1, priority_1, 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Tue Jan 22 19:33:02 2013
@@ -55,6 +55,8 @@ import org.apache.hadoop.yarn.conf.YarnC
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceCalculator;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -90,6 +92,8 @@ public class TestLeafQueue {
   final static int GB = 1024;
   final static String DEFAULT_RACK = "/default";
 
+  private final ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
+  
   @Before
   public void setUp() throws Exception {
     CapacityScheduler spyCs = new CapacityScheduler();
@@ -108,17 +112,22 @@ public class TestLeafQueue {
     when(csContext.getConfiguration()).thenReturn(csConf);
     when(csContext.getConf()).thenReturn(conf);
     when(csContext.getMinimumResourceCapability()).
-        thenReturn(Resources.createResource(GB));
+        thenReturn(Resources.createResource(GB, 1));
     when(csContext.getMaximumResourceCapability()).
-        thenReturn(Resources.createResource(16*GB));
+        thenReturn(Resources.createResource(16*GB, 32));
     when(csContext.getClusterResources()).
-        thenReturn(Resources.createResource(100 * 16 * GB));
+        thenReturn(Resources.createResource(100 * 16 * GB, 100 * 32));
+    when(csContext.getApplicationComparator()).
+    thenReturn(CapacityScheduler.applicationComparator);
+    when(csContext.getQueueComparator()).
+        thenReturn(CapacityScheduler.queueComparator);
+    when(csContext.getResourceCalculator()).
+        thenReturn(resourceCalculator);
+
     root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, 
             CapacitySchedulerConfiguration.ROOT, 
             queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator, 
             TestUtils.spyHook);
 
     cs.reinitialize(csConf, rmContext);
@@ -266,7 +275,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
     
     final int numNodes = 1;
-    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (8*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 
     // Setup resource-requests
@@ -387,7 +397,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
     
     final int numNodes = 1;
-    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (8*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 
     // Setup resource-requests
@@ -519,7 +530,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB);
     
     final int numNodes = 2;
-    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (8*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
  
     // Setup resource-requests
@@ -612,7 +624,7 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB);
     
     final int numNodes = 2;
-    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    Resource clusterResource = Resources.createResource(numNodes * (8*GB), 1);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
  
     // Setup resource-requests
@@ -728,7 +740,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
     
     final int numNodes = 1;
-    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (8*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
     
     // Setup resource-requests
@@ -889,7 +902,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB);
     
     final int numNodes = 2;
-    Resource clusterResource = Resources.createResource(numNodes * (4*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (4*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
     
     // Setup resource-requests
@@ -990,7 +1004,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB);
 
     final int numNodes = 3;
-    Resource clusterResource = Resources.createResource(numNodes * (4*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (4*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 
     // Setup resource-requests
@@ -1090,11 +1105,13 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB);
     
     final int numNodes = 3;
-    Resource clusterResource = Resources.createResource(numNodes * (4*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (4*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
     when(csContext.getMaximumResourceCapability()).thenReturn(
-        Resources.createResource(4*GB));
-    when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4*GB));
+        Resources.createResource(4*GB, 16));
+    when(a.getMaximumAllocation()).thenReturn(
+        Resources.createResource(4*GB, 16));
     when(a.getMinimumAllocationFactor()).thenReturn(0.25f); // 1G / 4G 
     
     // Setup resource-requests
@@ -1164,12 +1181,14 @@ public class TestLeafQueue {
     // Now finish another container from app_0 and see the reservation cancelled
     a.completedContainer(clusterResource, app_0, node_0, 
         app_0.getLiveContainers().iterator().next(), null, RMContainerEventType.KILL);
-    a.assignContainers(clusterResource, node_0);
-    assertEquals(4*GB, a.getUsedResources().getMemory());
+    CSAssignment assignment = a.assignContainers(clusterResource, node_0);
+    assertEquals(8*GB, a.getUsedResources().getMemory());
     assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
     assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
-    assertEquals(0*GB, app_1.getCurrentReservation().getMemory());
+    assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
     assertEquals(0*GB, node_0.getUsedResource().getMemory());
+    assertEquals(4*GB, 
+        assignment.getExcessReservation().getContainer().getResource().getMemory());
   }
   
   
@@ -1204,7 +1223,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8*GB);
 
     final int numNodes = 3;
-    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (8*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
     
     // Setup resource-requests and submit
@@ -1344,7 +1364,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8*GB);
 
     final int numNodes = 3;
-    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    Resource clusterResource = 
+        Resources.createResource(numNodes * (8*GB), 1);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
     
     // Setup resource-requests and submit
@@ -1473,7 +1494,8 @@ public class TestLeafQueue {
     FiCaSchedulerNode node_1_0 = TestUtils.getMockNode(host_1_0, rack_1, 0, 8*GB);
     
     final int numNodes = 3;
-    Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+    Resource clusterResource = Resources.createResource(
+        numNodes * (8*GB), numNodes * 16);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 
     // Setup resource-requests and submit

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java Tue Jan 22 19:33:02 2013
@@ -45,6 +45,8 @@ import org.apache.hadoop.yarn.api.record
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceCalculator;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
@@ -68,6 +70,9 @@ public class TestParentQueue {
   final static int GB = 1024;
   final static String DEFAULT_RACK = "/default";
 
+  private final ResourceCalculator resourceComparator =
+      new DefaultResourceCalculator();
+  
   @Before
   public void setUp() throws Exception {
     rmContext = TestUtils.getMockRMContext();
@@ -78,11 +83,17 @@ public class TestParentQueue {
     when(csContext.getConf()).thenReturn(conf);
     when(csContext.getConfiguration()).thenReturn(csConf);
     when(csContext.getMinimumResourceCapability()).thenReturn(
-        Resources.createResource(GB));
+        Resources.createResource(GB, 1));
     when(csContext.getMaximumResourceCapability()).thenReturn(
-        Resources.createResource(16*GB));
+        Resources.createResource(16*GB, 32));
     when(csContext.getClusterResources()).
-        thenReturn(Resources.createResource(100 * 16 * GB));
+        thenReturn(Resources.createResource(100 * 16 * GB, 100 * 32));
+    when(csContext.getApplicationComparator()).
+    thenReturn(CapacityScheduler.applicationComparator);
+    when(csContext.getQueueComparator()).
+    thenReturn(CapacityScheduler.queueComparator);
+    when(csContext.getResourceCalculator()).
+    thenReturn(resourceComparator);
   }
   
   private static final String A = "a";
@@ -104,7 +115,7 @@ public class TestParentQueue {
   private FiCaSchedulerApp getMockApplication(int appId, String user) {
     FiCaSchedulerApp application = mock(FiCaSchedulerApp.class);
     doReturn(user).when(application).getUser();
-    doReturn(Resources.createResource(0)).when(application).getHeadroom();
+    doReturn(Resources.createResource(0, 0)).when(application).getHeadroom();
     return application;
   }
 
@@ -192,12 +203,11 @@ public class TestParentQueue {
     CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, 
             CapacitySchedulerConfiguration.ROOT, queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator,
             TestUtils.spyHook);
 
     // Setup some nodes
     final int memoryPerNode = 10;
+    final int coresPerNode = 16;
     final int numNodes = 2;
     
     FiCaSchedulerNode node_0 = 
@@ -206,7 +216,8 @@ public class TestParentQueue {
         TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
     
     final Resource clusterResource = 
-        Resources.createResource(numNodes * (memoryPerNode*GB));
+        Resources.createResource(numNodes * (memoryPerNode*GB),
+            numNodes * coresPerNode);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 
     // Start testing
@@ -286,8 +297,7 @@ public class TestParentQueue {
     try {
       CapacityScheduler.parseQueue(csContext, csConf, null,
           CapacitySchedulerConfiguration.ROOT, queues, queues,
-          CapacityScheduler.queueComparator,
-          CapacityScheduler.applicationComparator, TestUtils.spyHook);
+          TestUtils.spyHook);
     } catch (IllegalArgumentException ie) {
       exceptionOccured = true;
     }
@@ -301,8 +311,7 @@ public class TestParentQueue {
     try {
       CapacityScheduler.parseQueue(csContext, csConf, null,
           CapacitySchedulerConfiguration.ROOT, queues, queues,
-          CapacityScheduler.queueComparator,
-          CapacityScheduler.applicationComparator, TestUtils.spyHook);
+          TestUtils.spyHook);
     } catch (IllegalArgumentException ie) {
       exceptionOccured = true;
     }
@@ -316,8 +325,7 @@ public class TestParentQueue {
     try {
       CapacityScheduler.parseQueue(csContext, csConf, null,
           CapacitySchedulerConfiguration.ROOT, queues, queues,
-          CapacityScheduler.queueComparator,
-          CapacityScheduler.applicationComparator, TestUtils.spyHook);
+          TestUtils.spyHook);
     } catch (IllegalArgumentException ie) {
       exceptionOccured = true;
     }
@@ -394,12 +402,11 @@ public class TestParentQueue {
     CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, 
             CapacitySchedulerConfiguration.ROOT, queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator,
             TestUtils.spyHook);
     
     // Setup some nodes
     final int memoryPerNode = 10;
+    final int coresPerNode = 16;
     final int numNodes = 3;
     
     FiCaSchedulerNode node_0 = 
@@ -410,7 +417,8 @@ public class TestParentQueue {
         TestUtils.getMockNode("host_2", DEFAULT_RACK, 0, memoryPerNode*GB);
     
     final Resource clusterResource = 
-        Resources.createResource(numNodes * (memoryPerNode*GB));
+        Resources.createResource(numNodes * (memoryPerNode*GB), 
+            numNodes * coresPerNode);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 
     // Start testing
@@ -510,8 +518,6 @@ public class TestParentQueue {
     Map<String, CSQueue> queues = new HashMap<String, CSQueue>(); 
     CapacityScheduler.parseQueue(csContext, csConf, null, 
         CapacitySchedulerConfiguration.ROOT, queues, queues, 
-        CapacityScheduler.queueComparator, 
-        CapacityScheduler.applicationComparator,
         TestUtils.spyHook);
   }
   
@@ -529,8 +535,6 @@ public class TestParentQueue {
     Map<String, CSQueue> queues = new HashMap<String, CSQueue>(); 
     CapacityScheduler.parseQueue(csContext, csConf, null, 
         CapacitySchedulerConfiguration.ROOT, queues, queues, 
-        CapacityScheduler.queueComparator, 
-        CapacityScheduler.applicationComparator,
         TestUtils.spyHook);
   }
   
@@ -553,8 +557,6 @@ public class TestParentQueue {
     try {
       CapacityScheduler.parseQueue(csContext, csConf, null, 
           CapacitySchedulerConfiguration.ROOT, queues, queues, 
-          CapacityScheduler.queueComparator, 
-          CapacityScheduler.applicationComparator,
           TestUtils.spyHook);
     } catch (IllegalArgumentException e) {
       fail("Failed to create queues with 0 capacity: " + e);
@@ -571,12 +573,11 @@ public class TestParentQueue {
     CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, 
             CapacitySchedulerConfiguration.ROOT, queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator,
             TestUtils.spyHook);
 
     // Setup some nodes
     final int memoryPerNode = 10;
+    final int coresPerNode = 16;
     final int numNodes = 2;
     
     FiCaSchedulerNode node_0 = 
@@ -585,7 +586,8 @@ public class TestParentQueue {
         TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
     
     final Resource clusterResource = 
-        Resources.createResource(numNodes * (memoryPerNode*GB));
+        Resources.createResource(numNodes * (memoryPerNode*GB), 
+            numNodes * coresPerNode);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 
     // Start testing
@@ -637,12 +639,11 @@ public class TestParentQueue {
     CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, 
             CapacitySchedulerConfiguration.ROOT, queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator,
             TestUtils.spyHook);
 
     // Setup some nodes
     final int memoryPerNode = 10;
+    final int coresPerNode = 10;
     final int numNodes = 2;
     
     FiCaSchedulerNode node_0 = 
@@ -651,7 +652,8 @@ public class TestParentQueue {
         TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB);
     
     final Resource clusterResource = 
-        Resources.createResource(numNodes * (memoryPerNode*GB));
+        Resources.createResource(numNodes * (memoryPerNode*GB),
+            numNodes * coresPerNode);
     when(csContext.getNumClusterNodes()).thenReturn(numNodes);
 
     // Start testing
@@ -721,8 +723,6 @@ public class TestParentQueue {
     CSQueue root = 
         CapacityScheduler.parseQueue(csContext, csConf, null, 
             CapacitySchedulerConfiguration.ROOT, queues, queues, 
-            CapacityScheduler.queueComparator, 
-            CapacityScheduler.applicationComparator,
             TestUtils.spyHook);
 
     UserGroupInformation user = UserGroupInformation.getCurrentUser();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java Tue Jan 22 19:33:02 2013
@@ -118,7 +118,7 @@ public class TestUtils {
       RecordFactory recordFactory) {
     ResourceRequest request = 
         recordFactory.newRecordInstance(ResourceRequest.class);
-    Resource capability = Resources.createResource(memory);
+    Resource capability = Resources.createResource(memory, 1);
     
     request.setNumContainers(numContainers);
     request.setHostName(hostName);
@@ -153,7 +153,7 @@ public class TestUtils {
     RMNode rmNode = mock(RMNode.class);
     when(rmNode.getNodeID()).thenReturn(nodeId);
     when(rmNode.getTotalCapability()).thenReturn(
-        Resources.createResource(capability));
+        Resources.createResource(capability, 1));
     when(rmNode.getNodeAddress()).thenReturn(host+":"+port);
     when(rmNode.getHostName()).thenReturn(host);
     when(rmNode.getRackName()).thenReturn(rack);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java Tue Jan 22 19:33:02 2013
@@ -1275,4 +1275,86 @@ public class TestFairScheduler {
     FSSchedulerApp app2 = scheduler.applications.get(attId2);
     assertNull("The application was allowed", app2);
   }
+  
+  @Test
+  public void testMultipleNodesSingleRackRequest() throws Exception {
+    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024));
+    RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024));
+    RMNode node3 = MockNodes.newNodeInfo(2, Resources.createResource(1024));
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+    NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
+    scheduler.handle(nodeEvent2);
+    
+    ApplicationAttemptId appId = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++);
+    scheduler.addApplication(appId, "queue1", "user1");
+    
+    // 1 request with 2 nodes on the same rack. another request with 1 node on
+    // a different rack
+    List<ResourceRequest> asks = new ArrayList<ResourceRequest>();
+    asks.add(createResourceRequest(1024, node1.getHostName(), 1, 1));
+    asks.add(createResourceRequest(1024, node2.getHostName(), 1, 1));
+    asks.add(createResourceRequest(1024, node3.getHostName(), 1, 1));
+    asks.add(createResourceRequest(1024, node1.getRackName(), 1, 1));
+    asks.add(createResourceRequest(1024, node3.getRackName(), 1, 1));
+    asks.add(createResourceRequest(1024, RMNode.ANY, 1, 2));
+
+    scheduler.allocate(appId, asks, new ArrayList<ContainerId>());
+    
+    // node 1 checks in
+    scheduler.update();
+    NodeUpdateSchedulerEvent updateEvent1 = new NodeUpdateSchedulerEvent(node1,
+      new ArrayList<ContainerStatus>(), new ArrayList<ContainerStatus>());
+    scheduler.handle(updateEvent1);
+    // should assign node local
+    assertEquals(1, scheduler.applications.get(appId).getLiveContainers().size());
+
+    // node 2 checks in
+    scheduler.update();
+    NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2,
+      new ArrayList<ContainerStatus>(), new ArrayList<ContainerStatus>());
+    scheduler.handle(updateEvent2);
+    // should assign rack local
+    assertEquals(2, scheduler.applications.get(appId).getLiveContainers().size());
+  }
+  
+  @Test
+  public void testFifoWithinQueue() throws Exception {
+    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(3072));
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+    
+    // Even if submitted at exact same time, apps will be deterministically
+    // ordered by name.
+    ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
+        "user1", 2);
+    ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1",
+        "user1", 2);
+    FSSchedulerApp app1 = scheduler.applications.get(attId1);
+    FSSchedulerApp app2 = scheduler.applications.get(attId2);
+    
+    FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1");
+    queue1.setSchedulingMode(SchedulingMode.FIFO);
+    
+    scheduler.update();
+
+    // First two containers should go to app 1, third should go to app 2.
+    // Because tests set assignmultiple to false, each heartbeat assigns a single
+    // container.
+    
+    NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1,
+      new ArrayList<ContainerStatus>(), new ArrayList<ContainerStatus>());
+
+    scheduler.handle(updateEvent);
+    assertEquals(1, app1.getLiveContainers().size());
+    assertEquals(0, app2.getLiveContainers().size());
+    
+    scheduler.handle(updateEvent);
+    assertEquals(2, app1.getLiveContainers().size());
+    assertEquals(0, app2.getLiveContainers().size());
+    
+    scheduler.handle(updateEvent);
+    assertEquals(2, app1.getLiveContainers().size());
+    assertEquals(1, app2.getLiveContainers().size());
+  }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java Tue Jan 22 19:33:02 2013
@@ -71,9 +71,9 @@ public class TestFifoScheduler {
   
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
       registerNode(String hostName, int containerManagerPort, int nmHttpPort,
-          String rackName, int memory) throws IOException {
+          String rackName, Resource capability) throws IOException {
     return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
-        hostName, containerManagerPort, nmHttpPort, rackName, memory,
+        hostName, containerManagerPort, nmHttpPort, rackName, capability,
         resourceManager.getResourceTrackerService(), resourceManager
             .getRMContext());
   }
@@ -121,13 +121,15 @@ public class TestFifoScheduler {
     // Register node1
     String host_0 = "host_0";
     org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = 
-      registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 4 * GB);
+      registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 
+          Resources.createResource(4 * GB, 1));
     nm_0.heartbeat();
     
     // Register node2
     String host_1 = "host_1";
     org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = 
-      registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 2 * GB);
+      registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 
+          Resources.createResource(2 * GB, 1));
     nm_1.heartbeat();
 
     // ResourceRequest priorities

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java Tue Jan 22 19:33:02 2013
@@ -52,8 +52,10 @@ import org.apache.hadoop.yarn.api.protoc
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ClientToken;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
@@ -67,6 +69,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.ProtoUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Test;
 
@@ -106,14 +109,14 @@ public class TestClientTokens {
   private static class CustomAM extends AbstractService implements
       CustomProtocol {
 
-    private final ApplicationId appId;
+    private final ApplicationAttemptId appAttemptId;
     private final String secretKey;
     private InetSocketAddress address;
     private boolean pinged = false;
 
-    public CustomAM(ApplicationId appId, String secretKeyStr) {
+    public CustomAM(ApplicationAttemptId appId, String secretKeyStr) {
       super("CustomAM");
-      this.appId = appId;
+      this.appAttemptId = appId;
       this.secretKey = secretKeyStr;
     }
 
@@ -128,7 +131,7 @@ public class TestClientTokens {
 
       ClientToAMTokenSecretManager secretManager = null;
       byte[] bytes = Base64.decodeBase64(this.secretKey);
-      secretManager = new ClientToAMTokenSecretManager(this.appId, bytes);
+      secretManager = new ClientToAMTokenSecretManager(this.appAttemptId, bytes);
       Server server;
       try {
         server =
@@ -216,7 +219,7 @@ public class TestClientTokens {
     GetApplicationReportResponse reportResponse =
         rm.getClientRMService().getApplicationReport(request);
     ApplicationReport appReport = reportResponse.getApplicationReport();
-    String clientTokenEncoded = appReport.getClientToken();
+    ClientToken clientToken = appReport.getClientToken();
 
     // Wait till AM is 'launched'
     int waitTime = 0;
@@ -226,9 +229,11 @@ public class TestClientTokens {
     Assert.assertNotNull(containerManager.clientTokensSecret);
 
     // Start the AM with the correct shared-secret.
+    ApplicationAttemptId appAttemptId =
+        app.getAppAttempts().keySet().iterator().next();
+    Assert.assertNotNull(appAttemptId);
     final CustomAM am =
-        new CustomAM(app.getApplicationId(),
-          containerManager.clientTokensSecret);
+        new CustomAM(appAttemptId, containerManager.clientTokensSecret);
     am.init(conf);
     am.start();
 
@@ -249,21 +254,19 @@ public class TestClientTokens {
 
     // Verify denial for a malicious user
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me");
-    Token<ClientTokenIdentifier> clientToken =
-        new Token<ClientTokenIdentifier>();
-    clientToken.decodeFromUrlString(clientTokenEncoded);
-    // RPC layer client expects ip:port as service for tokens
-    SecurityUtil.setTokenService(clientToken, am.address);
+    Token<ClientTokenIdentifier> token =
+        ProtoUtils.convertFromProtoFormat(clientToken, am.address);
 
     // Malicious user, messes with appId
     ClientTokenIdentifier maliciousID =
-        new ClientTokenIdentifier(BuilderUtils.newApplicationId(app
-          .getApplicationId().getClusterTimestamp(), 42));
+        new ClientTokenIdentifier(BuilderUtils.newApplicationAttemptId(
+          BuilderUtils.newApplicationId(app.getApplicationId()
+            .getClusterTimestamp(), 42), 43));
 
     Token<ClientTokenIdentifier> maliciousToken =
         new Token<ClientTokenIdentifier>(maliciousID.getBytes(),
-          clientToken.getPassword(), clientToken.getKind(),
-          clientToken.getService());
+          token.getPassword(), token.getKind(),
+          token.getService());
     ugi.addToken(maliciousToken);
 
     try {
@@ -297,7 +300,7 @@ public class TestClientTokens {
 
     // Now for an authenticated user
     ugi = UserGroupInformation.createRemoteUser("me");
-    ugi.addToken(clientToken);
+    ugi.addToken(token);
 
     ugi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java Tue Jan 22 19:33:02 2013
@@ -424,8 +424,8 @@ public class TestRMWebServicesCapacitySc
     assertEquals("absoluteUsedCapacity doesn't match",
         0, info.absoluteUsedCapacity, 1e-3f);
     assertEquals("numApplications doesn't match", 0, info.numApplications);
-    assertTrue("usedResources doesn't match",
-        info.usedResources.matches("memory: 0"));
+    assertTrue("usedResources doesn't match ",
+        info.usedResources.matches("<memory:0, vCores:0>"));
     assertTrue("queueName doesn't match, got: " + info.queueName
         + " expected: " + q, qshortName.matches(info.queueName));
     assertTrue("state doesn't match",

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer Tue Jan 22 19:33:02 2013
@@ -1 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer$Renewer

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java Tue Jan 22 19:33:02 2013
@@ -218,7 +218,7 @@ public class TestContainerManagerSecurit
     dummyIdentifier.readFields(di);
 
     // Malice user modifies the resource amount
-    Resource modifiedResource = BuilderUtils.newResource(2048);
+    Resource modifiedResource = BuilderUtils.newResource(2048, 1);
     ContainerTokenIdentifier modifiedIdentifier =
         new ContainerTokenIdentifier(dummyIdentifier.getContainerID(),
           dummyIdentifier.getNmHostAddress(), "testUser", modifiedResource,
@@ -401,23 +401,9 @@ public class TestContainerManagerSecurit
       UnsupportedFileSystemException, YarnRemoteException,
       InterruptedException {
 
-    // TODO: Use a resource to work around bugs. Today NM doesn't create local
-    // app-dirs if there are no file to download!!
-    String fileName = "testFile-" + appID.toString();
-    File testFile = new File(localDir.getAbsolutePath(), fileName);
-    FileWriter tmpFile = new FileWriter(testFile);
-    tmpFile.write("testing");
-    tmpFile.close();
-    URL testFileURL = ConverterUtils.getYarnUrlFromPath(FileContext
-        .getFileContext().makeQualified(
-            new Path(localDir.getAbsolutePath(), fileName)));
-    LocalResource rsrc = BuilderUtils.newLocalResource(testFileURL,
-        LocalResourceType.FILE, LocalResourceVisibility.PRIVATE, testFile
-            .length(), testFile.lastModified());
-
     ContainerLaunchContext amContainer = BuilderUtils
         .newContainerLaunchContext(null, "testUser", BuilderUtils
-            .newResource(1024), Collections.singletonMap(fileName, rsrc),
+            .newResource(1024, 1), Collections.<String, LocalResource>emptyMap(),
             new HashMap<String, String>(), Arrays.asList("sleep", "100"),
             new HashMap<String, ByteBuffer>(), null,
             new HashMap<ApplicationAccessType, String>());
@@ -495,7 +481,7 @@ public class TestContainerManagerSecurit
     // Request a container allocation.
     List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
     ask.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0), "*",
-        BuilderUtils.newResource(1024), 1));
+        BuilderUtils.newResource(1024, 1), 1));
 
     AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
         BuilderUtils.newApplicationAttemptId(appID, 1), 0, 0F, ask,
@@ -596,7 +582,9 @@ public class TestContainerManagerSecurit
     ContainerLaunchContext context =
         BuilderUtils.newContainerLaunchContext(tokenId.getContainerID(),
             "testUser",
-            BuilderUtils.newResource(tokenId.getResource().getMemory()),
+            BuilderUtils.newResource(
+                tokenId.getResource().getMemory(), 
+                tokenId.getResource().getVirtualCores()),
             new HashMap<String, LocalResource>(),
             new HashMap<String, String>(), new ArrayList<String>(),
             new HashMap<String, ByteBuffer>(), null,

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java Tue Jan 22 19:33:02 2013
@@ -286,9 +286,12 @@ public class WebAppProxyServlet extends 
         		"please try the history server");
         return;
       }
-      URI trackingUri = ProxyUriUtils.getUriFromAMUrl(
-          applicationReport.getOriginalTrackingUrl());
-      if(applicationReport.getOriginalTrackingUrl().equals("N/A")) {
+      String original = applicationReport.getOriginalTrackingUrl();
+      URI trackingUri = null;
+      if (original != null) {
+        trackingUri = ProxyUriUtils.getUriFromAMUrl(original);
+      }
+      if(original == null || original.equals("N/A")) {
         String message;
         switch(applicationReport.getFinalApplicationStatus()) {
           case FAILED:

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm Tue Jan 22 19:33:02 2013
@@ -132,20 +132,45 @@ Hadoop MapReduce Next Generation - Fair 
     * Whether to use the username associated with the allocation as the default 
       queue name, in the event that a queue name is not specified. If this is set 
       to "false" or unset, all jobs have a shared default queue, called "default".
+      Defaults to true.
 
  * <<<yarn.scheduler.fair.preemption>>>
 
     * Whether to use preemption. Note that preemption is experimental in the current
-      version.
+      version. Defaults to false.
 
  * <<<yarn.scheduler.fair.sizebasedweight>>>
   
     * Whether to assign shares to individual apps based on their size, rather than
-      providing an equal share to all apps regardless of size.
+      providing an equal share to all apps regardless of size. Defaults to false.
 
  * <<<yarn.scheduler.fair.assignmultiple>>>
 
-    * Whether to allow multiple container assignments in one heartbeat.
+    * Whether to allow multiple container assignments in one heartbeat. Defaults
+      to false.
+
+ * <<<yarn.scheduler.fair.max.assign>>>
+
+    * If assignmultiple is true, the maximum amount of containers that can be
+      assigned in one heartbeat. Defaults to -1, which sets no limit.
+
+ * <<<locality.threshold.node>>>
+
+    * For applications that request containers on particular nodes, the number of
+      scheduling opportunities since the last container assignment to wait before
+      accepting a placement on another node. Expressed as a float between 0 and 1,
+      which, as a fraction of the cluster size, is the number of scheduling
+      opportunities to pass up. The default value of -1.0 means don't pass up any
+      scheduling opportunities.
+
+ * <<<locality.threshold.rack>>>
+
+    * For applications that request containers on particular racks, the number of
+      scheduling opportunities since the last container assignment to wait before
+      accepting a placement on another rack. Expressed as a float between 0 and 1,
+      which, as a fraction of the cluster size, is the number of scheduling
+      opportunities to pass up. The default value of -1.0 means don't pass up any
+      scheduling opportunities.
 
 Allocation file format
 
@@ -166,6 +191,14 @@ Allocation file format
    * schedulingMode: either "fifo" or "fair" depending on the in-queue scheduling
      policy desired
 
+   * aclSubmitApps: a list of users that can submit apps to the queue. A (default)
+     value of "*" means that any users can submit apps. A queue inherits the ACL of
+     its parent, so if a queue2 descends from queue1, and user1 is in queue1's ACL,
+     and user2 is in queue2's ACL, then both users may submit to queue2.
+
+   * minSharePreemptionTimeout: number of seconds the queue is under its minimum share
+     before it will try to preempt containers to take resources from other queues.
+
  * <<User elements>>, which represent settings governing the behavior of individual 
      users. They can contain a single property: maxRunningApps, a limit on the 
      number of running apps for a particular user.
@@ -173,6 +206,10 @@ Allocation file format
  * <<A userMaxAppsDefault element>>, which sets the default running app limit 
    for any users whose limit is not otherwise specified.
 
+ * <<A fairSharePreemptionTimeout element>>, number of seconds a queue is under
+   its fair share before it will try to preempt containers to take resources from
+   other queues.
+
   An example allocation file is given here:
 
 ---

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/pom.xml?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/pom.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/hadoop-yarn/pom.xml Tue Jan 22 19:33:02 2013
@@ -149,9 +149,10 @@
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
+          <excludes>
+            <exclude>conf/slaves</exclude>
+            <exclude>conf/container-executor.cfg</exclude>
+          </excludes>
         </configuration>
       </plugin>
       <plugin>

Modified: hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/pom.xml?rev=1437113&r1=1437112&r2=1437113&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/pom.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-yarn-project/pom.xml Tue Jan 22 19:33:02 2013
@@ -213,9 +213,9 @@ with tarfile.open(tar_name, "w:gz") as t
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
+          <excludes>
+            <exclude>CHANGES.txt</exclude>
+          </excludes>
         </configuration>
       </plugin>
     </plugins>