You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by to...@apache.org on 2012/02/10 02:49:30 UTC
svn commit: r1242635 [9/10] - in
/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ bin/ conf/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/s...
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java Fri Feb 10 01:49:08 2012
@@ -25,14 +25,12 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
/**
* The RM webapp
*/
-public class RMWebApp extends WebApp {
- static final String APP_ID = "app.id";
- static final String QUEUE_NAME = "queue.name";
- static final String NODE_STATE = "node.state";
+public class RMWebApp extends WebApp implements YarnWebParams {
private final ResourceManager rm;
@@ -53,9 +51,9 @@ public class RMWebApp extends WebApp {
}
route("/", RmController.class);
route(pajoin("/nodes", NODE_STATE), RmController.class, "nodes");
- route("/apps", RmController.class);
+ route(pajoin("/apps", APP_STATE), RmController.class);
route("/cluster", RmController.class, "about");
- route(pajoin("/app", APP_ID), RmController.class, "app");
+ route(pajoin("/app", APPLICATION_ID), RmController.class, "app");
route("/scheduler", RmController.class, "scheduler");
route(pajoin("/queue", QUEUE_NAME), RmController.class, "queue");
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java Fri Feb 10 01:49:08 2012
@@ -166,6 +166,12 @@ public class RMWebServices {
if (!(nodeInfo.getState().equalsIgnoreCase(filterState))) {
continue;
}
+ } else {
+ // No filter. User is asking for all nodes. Make sure you skip the
+ // unhealthy nodes.
+ if (ni.getState() == RMNodeState.UNHEALTHY) {
+ continue;
+ }
}
if ((healthState != null) && (!healthState.isEmpty())) {
LOG.info("heatlh state is : " + healthState);
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java Fri Feb 10 01:49:08 2012
@@ -18,9 +18,9 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.APP_ID;
import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.QUEUE_NAME;
import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
import javax.servlet.http.HttpServletResponse;
@@ -64,7 +64,7 @@ public class RmController extends Contro
}
public void app() {
- String aid = $(APP_ID);
+ String aid = $(APPLICATION_ID);
if (aid.isEmpty()) {
setStatus(HttpServletResponse.SC_BAD_REQUEST);
setTitle("Bad request: requires application ID");
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java Fri Feb 10 01:49:08 2012
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.server.re
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
+import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
// Do NOT rename/refactor this to RMView as it will wreak havoc
@@ -36,10 +38,14 @@ public class RmView extends TwoColumnLay
set(DATATABLES_ID, "apps");
set(initID(DATATABLES, "apps"), appsTableInit());
setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
+
+ // Set the correct title.
+ String reqState = $(APP_STATE);
+ reqState = (reqState == null || reqState.isEmpty() ? "All" : reqState);
+ setTitle(sjoin(reqState, "Applications"));
}
protected void commonPreHead(Page.HTML<_> html) {
- //html.meta_http("refresh", "20");
set(ACCORDION_ID, "nav");
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
set(THEMESWITCHER_ID, "themeswitcher");
@@ -57,10 +63,11 @@ public class RmView extends TwoColumnLay
private String appsTableInit() {
AppsList list = getInstance(AppsList.class);
- // id, user, name, queue, state, progress, ui, note
+ // id, user, name, queue, starttime, finishtime, state, progress, ui
StringBuilder init = tableInit().
- append(", aoColumns:[{sType:'title-numeric'}, null, null, null, null,").
- append("null,{sType:'title-numeric', bSearchable:false}, null, null]");
+ append(", aoColumns:[{sType:'title-numeric'}, null, null, null, ").
+ append("null, null , null, ").
+ append("null,{sType:'title-numeric', bSearchable:false}, null]");
// Sort by id upon page load
init.append(", aaSorting: [[0, 'asc']]");
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java Fri Feb 10 01:49:08 2012
@@ -31,13 +31,21 @@ import org.apache.hadoop.yarn.server.res
@XmlAccessorType(XmlAccessType.FIELD)
public class ClusterMetricsInfo {
- private static final long MB_IN_GB = 1024;
-
protected int appsSubmitted;
+ protected int appsCompleted;
+ protected int appsPending;
+ protected int appsRunning;
+ protected int appsFailed;
+ protected int appsKilled;
+
protected long reservedMB;
protected long availableMB;
protected long allocatedMB;
+
protected int containersAllocated;
+ protected int containersReserved;
+ protected int containersPending;
+
protected long totalMB;
protected int totalNodes;
protected int lostNodes;
@@ -55,10 +63,20 @@ public class ClusterMetricsInfo {
ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
this.appsSubmitted = metrics.getAppsSubmitted();
- this.reservedMB = metrics.getReservedGB() * MB_IN_GB;
- this.availableMB = metrics.getAvailableGB() * MB_IN_GB;
- this.allocatedMB = metrics.getAllocatedGB() * MB_IN_GB;
+ this.appsCompleted = metrics.getAppsCompleted();
+ this.appsPending = metrics.getAppsPending();
+ this.appsRunning = metrics.getAppsRunning();
+ this.appsFailed = metrics.getAppsFailed();
+ this.appsKilled = metrics.getAppsKilled();
+
+ this.reservedMB = metrics.getReservedMB();
+ this.availableMB = metrics.getAvailableMB();
+ this.allocatedMB = metrics.getAllocatedMB();
+
this.containersAllocated = metrics.getAllocatedContainers();
+ this.containersPending = metrics.getPendingContainers();
+ this.containersReserved = metrics.getReservedContainers();
+
this.totalMB = availableMB + reservedMB + allocatedMB;
this.activeNodes = clusterMetrics.getNumActiveNMs();
this.lostNodes = clusterMetrics.getNumLostNMs();
@@ -73,6 +91,26 @@ public class ClusterMetricsInfo {
return this.appsSubmitted;
}
+ public int getAppsCompleted() {
+ return appsCompleted;
+ }
+
+ public int getAppsPending() {
+ return appsPending;
+ }
+
+ public int getAppsRunning() {
+ return appsRunning;
+ }
+
+ public int getAppsFailed() {
+ return appsFailed;
+ }
+
+ public int getAppsKilled() {
+ return appsKilled;
+ }
+
public long getReservedMB() {
return this.reservedMB;
}
@@ -89,6 +127,14 @@ public class ClusterMetricsInfo {
return this.containersAllocated;
}
+ public int getReservedContainers() {
+ return this.containersReserved;
+ }
+
+ public int getPendingContainers() {
+ return this.containersPending;
+ }
+
public long getTotalMB() {
return this.totalMB;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java Fri Feb 10 01:49:08 2012
@@ -31,9 +31,12 @@ import org.apache.hadoop.yarn.server.res
@XmlAccessorType(XmlAccessType.FIELD)
public class UserMetricsInfo {
- private static final long MB_IN_GB = 1024;
-
protected int appsSubmitted;
+ protected int appsCompleted;
+ protected int appsPending;
+ protected int appsRunning;
+ protected int appsFailed;
+ protected int appsKilled;
protected int runningContainers;
protected int pendingContainers;
protected int reservedContainers;
@@ -56,13 +59,21 @@ public class UserMetricsInfo {
if (userMetrics != null) {
this.userMetricsAvailable = true;
+
this.appsSubmitted = userMetrics.getAppsSubmitted();
+ this.appsCompleted = metrics.getAppsCompleted();
+ this.appsPending = metrics.getAppsPending();
+ this.appsRunning = metrics.getAppsRunning();
+ this.appsFailed = metrics.getAppsFailed();
+ this.appsKilled = metrics.getAppsKilled();
+
this.runningContainers = userMetrics.getAllocatedContainers();
this.pendingContainers = userMetrics.getPendingContainers();
this.reservedContainers = userMetrics.getReservedContainers();
- this.reservedMB = userMetrics.getReservedGB() * MB_IN_GB;
- this.pendingMB = userMetrics.getPendingGB() * MB_IN_GB;
- this.allocatedMB = userMetrics.getAllocatedGB() * MB_IN_GB;
+
+ this.reservedMB = userMetrics.getReservedMB();
+ this.pendingMB = userMetrics.getPendingMB();
+ this.allocatedMB = userMetrics.getAllocatedMB();
}
}
@@ -74,6 +85,26 @@ public class UserMetricsInfo {
return this.appsSubmitted;
}
+ public int getAppsCompleted() {
+ return appsCompleted;
+ }
+
+ public int getAppsPending() {
+ return appsPending;
+ }
+
+ public int getAppsRunning() {
+ return appsRunning;
+ }
+
+ public int getAppsFailed() {
+ return appsFailed;
+ }
+
+ public int getAppsKilled() {
+ return appsKilled;
+ }
+
public long getReservedMB() {
return this.reservedMB;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml Fri Feb 10 01:49:08 2012
@@ -53,9 +53,9 @@
<property>
<name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
- <value>-1</value>
+ <value>100</value>
<description>
- The maximum capacity of the default queue. A value of -1 disables this.
+ The maximum capacity of the default queue.
</description>
</property>
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java Fri Feb 10 01:49:08 2012
@@ -51,18 +51,23 @@ public class MockNodes {
List<RMNode> list = Lists.newArrayList();
for (int i = 0; i < racks; ++i) {
for (int j = 0; j < nodesPerRack; ++j) {
+ if (j == (nodesPerRack - 1)) {
+ // One unhealthy node per rack.
+ list.add(nodeInfo(i, perNode, RMNodeState.UNHEALTHY));
+ }
list.add(newNodeInfo(i, perNode));
}
}
return list;
}
- public static List<RMNode> lostNodes(int racks, int nodesPerRack,
+ public static List<RMNode> deactivatedNodes(int racks, int nodesPerRack,
Resource perNode) {
List<RMNode> list = Lists.newArrayList();
for (int i = 0; i < racks; ++i) {
for (int j = 0; j < nodesPerRack; ++j) {
- list.add(lostNodeInfo(i, perNode, RMNodeState.LOST));
+ RMNodeState[] allStates = RMNodeState.values();
+ list.add(nodeInfo(i, perNode, allStates[j % allStates.length]));
}
}
return list;
@@ -198,15 +203,20 @@ public class MockNodes {
final String httpAddress = httpAddr;
final NodeHealthStatus nodeHealthStatus =
recordFactory.newRecordInstance(NodeHealthStatus.class);
+ if (state != RMNodeState.UNHEALTHY) {
+ nodeHealthStatus.setIsNodeHealthy(true);
+ nodeHealthStatus.setHealthReport("HealthyMe");
+ }
return new MockRMNodeImpl(nodeID, hostName, httpAddress, perNode, rackName,
nodeHealthStatus, nid, hostName, state);
}
- public static RMNode lostNodeInfo(int rack, final Resource perNode, RMNodeState state) {
+ public static RMNode nodeInfo(int rack, final Resource perNode,
+ RMNodeState state) {
return buildRMNode(rack, perNode, state, "N/A");
}
public static RMNode newNodeInfo(int rack, final Resource perNode) {
- return buildRMNode(rack, perNode, null, "localhost:0");
+ return buildRMNode(rack, perNode, RMNodeState.RUNNING, "localhost:0");
}
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java Fri Feb 10 01:49:08 2012
@@ -24,38 +24,22 @@ import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.NodeReport;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Test;
public class TestFifoScheduler {
private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class);
- private ResourceManager resourceManager = null;
-
- @Before
- public void setUp() throws Exception {
- Store store = StoreFactory.getStore(new Configuration());
- resourceManager = new ResourceManager(store);
- resourceManager.init(new Configuration());
- }
-
- @After
- public void tearDown() throws Exception {
- }
+ private final int GB = 1024;
@Test
public void test() throws Exception {
@@ -63,7 +47,6 @@ public class TestFifoScheduler {
rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM();
rm.start();
- int GB = 1024;
MockNM nm1 = rm.registerNode("h1:1234", 6 * GB);
MockNM nm2 = rm.registerNode("h2:5678", 4 * GB);
@@ -146,8 +129,48 @@ public class TestFifoScheduler {
rm.stop();
}
+ private void testMinimumAllocation(YarnConfiguration conf)
+ throws Exception {
+ MockRM rm = new MockRM(conf);
+ rm.start();
+
+ // Register node1
+ MockNM nm1 = rm.registerNode("h1:1234", 6 * GB);
+
+ // Submit an application
+ RMApp app1 = rm.submitApp(256);
+
+ // kick the scheduling
+ nm1.nodeHeartbeat(true);
+ RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+ MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+ am1.registerAppAttempt();
+ SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
+ nm1.getNodeId());
+
+ int checkAlloc =
+ conf.getInt("yarn.scheduler.fifo.minimum-allocation-mb", GB);
+ Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemory());
+
+ rm.stop();
+ }
+
+ @Test
+ public void testDefaultMinimumAllocation() throws Exception {
+ testMinimumAllocation(new YarnConfiguration());
+ }
+
+ @Test
+ public void testNonDefaultMinimumAllocation() throws Exception {
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.setInt("yarn.scheduler.fifo.minimum-allocation-mb", 512);
+ testMinimumAllocation(conf);
+ }
+
public static void main(String[] args) throws Exception {
TestFifoScheduler t = new TestFifoScheduler();
t.test();
+ t.testDefaultMinimumAllocation();
+ t.testNonDefaultMinimumAllocation();
}
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java Fri Feb 10 01:49:08 2012
@@ -22,14 +22,14 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationMaster;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.ApplicationStatus;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
@@ -41,7 +41,6 @@ import com.google.common.collect.Lists;
@InterfaceAudience.Private
public abstract class MockAsm extends MockApps {
- static final int DT = 1000000; // ms
public static class AppMasterBase implements ApplicationMaster {
@Override
@@ -232,9 +231,10 @@ public abstract class MockAsm extends Mo
final String user = newUserName();
final String name = newAppName();
final String queue = newQueue();
- final long start = System.currentTimeMillis() - (int)(Math.random()*DT);
- final long finish = Math.random() < 0.5 ? 0 :
- System.currentTimeMillis() + (int)(Math.random()*DT);
+ final long start = 123456 + i * 1000;
+ final long finish = 234567 + i * 1000;
+ RMAppState[] allStates = RMAppState.values();
+ final RMAppState state = allStates[i % allStates.length];
return new ApplicationBase() {
@Override
public ApplicationId getApplicationId() {
@@ -270,7 +270,7 @@ public abstract class MockAsm extends Mo
}
@Override
public RMAppState getState() {
- return RMAppState.RUNNING;
+ return state;
}
@Override
public StringBuilder getDiagnostics() {
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java Fri Feb 10 01:49:08 2012
@@ -57,16 +57,16 @@ public class TestQueueMetrics {
metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
// Available resources is set externally, as it depends on dynamic
// configurable cluster/queue resources
- checkResources(queueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
+ checkResources(queueSource, 0, 0, 0, 0, 100*GB, 15*GB, 5, 0, 0);
metrics.incrAppsRunning(user);
checkApps(queueSource, 1, 0, 1, 0, 0, 0);
metrics.allocateResources(user, 3, Resources.createResource(2*GB));
- checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 0, 0);
+ checkResources(queueSource, 6*GB, 3, 3, 0, 100*GB, 9*GB, 2, 0, 0);
metrics.releaseResources(user, 1, Resources.createResource(2*GB));
- checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
+ checkResources(queueSource, 4*GB, 2, 3, 1, 100*GB, 9*GB, 2, 0, 0);
metrics.finishApp(app, RMAppAttemptState.FINISHED);
checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -92,20 +92,20 @@ public class TestQueueMetrics {
metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
// Available resources is set externally, as it depends on dynamic
// configurable cluster/queue resources
- checkResources(queueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
- checkResources(userSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
+ checkResources(queueSource, 0, 0, 0, 0, 100*GB, 15*GB, 5, 0, 0);
+ checkResources(userSource, 0, 0, 0, 0, 10*GB, 15*GB, 5, 0, 0);
metrics.incrAppsRunning(user);
checkApps(queueSource, 1, 0, 1, 0, 0, 0);
checkApps(userSource, 1, 0, 1, 0, 0, 0);
metrics.allocateResources(user, 3, Resources.createResource(2*GB));
- checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 0, 0);
- checkResources(userSource, 6, 3, 3, 0, 10, 9, 2, 0, 0);
+ checkResources(queueSource, 6*GB, 3, 3, 0, 100*GB, 9*GB, 2, 0, 0);
+ checkResources(userSource, 6*GB, 3, 3, 0, 10*GB, 9*GB, 2, 0, 0);
metrics.releaseResources(user, 1, Resources.createResource(2*GB));
- checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
- checkResources(userSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
+ checkResources(queueSource, 4*GB, 2, 3, 1, 100*GB, 9*GB, 2, 0, 0);
+ checkResources(userSource, 4*GB, 2, 3, 1, 10*GB, 9*GB, 2, 0, 0);
metrics.finishApp(app, RMAppAttemptState.FINISHED);
checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -141,10 +141,10 @@ public class TestQueueMetrics {
parentMetrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
- checkResources(queueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
- checkResources(parentQueueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
- checkResources(userSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
- checkResources(parentUserSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
+ checkResources(queueSource, 0, 0, 0, 0, 100*GB, 15*GB, 5, 0, 0);
+ checkResources(parentQueueSource, 0, 0, 0, 0, 100*GB, 15*GB, 5, 0, 0);
+ checkResources(userSource, 0, 0, 0, 0, 10*GB, 15*GB, 5, 0, 0);
+ checkResources(parentUserSource, 0, 0, 0, 0, 10*GB, 15*GB, 5, 0, 0);
metrics.incrAppsRunning(user);
checkApps(queueSource, 1, 0, 1, 0, 0, 0);
@@ -154,17 +154,17 @@ public class TestQueueMetrics {
metrics.reserveResource(user, Resources.createResource(3*GB));
// Available resources is set externally, as it depends on dynamic
// configurable cluster/queue resources
- checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 3, 1);
- checkResources(parentQueueSource, 6, 3, 3, 0, 100, 9, 2, 3, 1);
- checkResources(userSource, 6, 3, 3, 0, 10, 9, 2, 3, 1);
- checkResources(parentUserSource, 6, 3, 3, 0, 10, 9, 2, 3, 1);
+ checkResources(queueSource, 6*GB, 3, 3, 0, 100*GB, 9*GB, 2, 3*GB, 1);
+ checkResources(parentQueueSource, 6*GB, 3, 3, 0, 100*GB, 9*GB, 2, 3*GB, 1);
+ checkResources(userSource, 6*GB, 3, 3, 0, 10*GB, 9*GB, 2, 3*GB, 1);
+ checkResources(parentUserSource, 6*GB, 3, 3, 0, 10*GB, 9*GB, 2, 3*GB, 1);
metrics.releaseResources(user, 1, Resources.createResource(2*GB));
metrics.unreserveResource(user, Resources.createResource(3*GB));
- checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
- checkResources(parentQueueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
- checkResources(userSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
- checkResources(parentUserSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
+ checkResources(queueSource, 4*GB, 2, 3, 1, 100*GB, 9*GB, 2, 0, 0);
+ checkResources(parentQueueSource, 4*GB, 2, 3, 1, 100*GB, 9*GB, 2, 0, 0);
+ checkResources(userSource, 4*GB, 2, 3, 1, 10*GB, 9*GB, 2, 0, 0);
+ checkResources(parentUserSource, 4*GB, 2, 3, 1, 10*GB, 9*GB, 2, 0, 0);
metrics.finishApp(app, RMAppAttemptState.FINISHED);
checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -184,18 +184,19 @@ public class TestQueueMetrics {
assertCounter("AppsKilled", killed, rb);
}
- public static void checkResources(MetricsSource source, int allocGB,
- int allocCtnrs, long aggreAllocCtnrs, long aggreReleasedCtnrs, int availGB, int pendingGB, int pendingCtnrs,
- int reservedGB, int reservedCtnrs) {
+ public static void checkResources(MetricsSource source, int allocatedMB,
+ int allocCtnrs, long aggreAllocCtnrs, long aggreReleasedCtnrs,
+ int availableMB, int pendingMB, int pendingCtnrs,
+ int reservedMB, int reservedCtnrs) {
MetricsRecordBuilder rb = getMetrics(source);
- assertGauge("AllocatedGB", allocGB, rb);
+ assertGauge("AllocatedMB", allocatedMB, rb);
assertGauge("AllocatedContainers", allocCtnrs, rb);
assertCounter("AggregateContainersAllocated", aggreAllocCtnrs, rb);
assertCounter("AggregateContainersReleased", aggreReleasedCtnrs, rb);
- assertGauge("AvailableGB", availGB, rb);
- assertGauge("PendingGB", pendingGB, rb);
+ assertGauge("AvailableMB", availableMB, rb);
+ assertGauge("PendingMB", pendingMB, rb);
assertGauge("PendingContainers", pendingCtnrs, rb);
- assertGauge("ReservedGB", reservedGB, rb);
+ assertGauge("ReservedMB", reservedMB, rb);
assertGauge("ReservedContainers", reservedCtnrs, rb);
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java Fri Feb 10 01:49:08 2012
@@ -142,7 +142,7 @@ public class TestApplicationLimits {
CapacityScheduler.parseQueue(csContext, csConf, null, "root",
queues, queues,
CapacityScheduler.queueComparator,
- CapacityScheduler.applicationComparator,
+ CapacityScheduler.applicationComparator,
TestUtils.spyHook);
LeafQueue queue = (LeafQueue)queues.get(A);
@@ -153,29 +153,39 @@ public class TestApplicationLimits {
queue.getMaximumActiveApplicationsPerUser());
int expectedMaxActiveApps =
Math.max(1,
- (int)((clusterResource.getMemory() / LeafQueue.DEFAULT_AM_RESOURCE) *
+ (int)Math.ceil(((float)clusterResource.getMemory() / (1*GB)) *
csConf.getMaximumApplicationMasterResourcePercent() *
- queue.getAbsoluteCapacity()));
+ queue.getAbsoluteMaximumCapacity()));
assertEquals(expectedMaxActiveApps,
queue.getMaximumActiveApplications());
- assertEquals((int)(expectedMaxActiveApps * (queue.getUserLimit() / 100.0f) *
- queue.getUserLimitFactor()),
- queue.getMaximumActiveApplicationsPerUser());
+ assertEquals(
+ (int)Math.ceil(
+ expectedMaxActiveApps * (queue.getUserLimit() / 100.0f) *
+ queue.getUserLimitFactor()),
+ queue.getMaximumActiveApplicationsPerUser());
+ assertEquals(
+ (int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),
+ queue.getMetrics().getAvailableMB()
+ );
// Add some nodes to the cluster & test new limits
clusterResource = Resources.createResource(120 * 16 * GB);
root.updateClusterResource(clusterResource);
expectedMaxActiveApps =
Math.max(1,
- (int)((clusterResource.getMemory() / LeafQueue.DEFAULT_AM_RESOURCE) *
+ (int)Math.ceil(((float)clusterResource.getMemory() / (1*GB)) *
csConf.getMaximumApplicationMasterResourcePercent() *
- queue.getAbsoluteCapacity()));
+ queue.getAbsoluteMaximumCapacity()));
assertEquals(expectedMaxActiveApps,
queue.getMaximumActiveApplications());
- assertEquals((int)(expectedMaxActiveApps * (queue.getUserLimit() / 100.0f) *
- queue.getUserLimitFactor()),
- queue.getMaximumActiveApplicationsPerUser());
-
+ assertEquals(
+ (int)Math.ceil(expectedMaxActiveApps *
+ (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()),
+ queue.getMaximumActiveApplicationsPerUser());
+ assertEquals(
+ (int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),
+ queue.getMetrics().getAvailableMB()
+ );
}
@Test
@@ -257,6 +267,87 @@ public class TestApplicationLimits {
}
@Test
+ public void testActiveLimitsWithKilledApps() throws Exception {
+ final String user_0 = "user_0";
+
+ int APPLICATION_ID = 0;
+
+ // set max active to 2
+ doReturn(2).when(queue).getMaximumActiveApplications();
+
+ // Submit first application
+ SchedulerApp app_0 = getMockApplication(APPLICATION_ID++, user_0);
+ queue.submitApplication(app_0, user_0, A);
+ assertEquals(1, queue.getNumActiveApplications());
+ assertEquals(0, queue.getNumPendingApplications());
+ assertEquals(1, queue.getNumActiveApplications(user_0));
+ assertEquals(0, queue.getNumPendingApplications(user_0));
+ assertTrue(queue.activeApplications.contains(app_0));
+
+ // Submit second application
+ SchedulerApp app_1 = getMockApplication(APPLICATION_ID++, user_0);
+ queue.submitApplication(app_1, user_0, A);
+ assertEquals(2, queue.getNumActiveApplications());
+ assertEquals(0, queue.getNumPendingApplications());
+ assertEquals(2, queue.getNumActiveApplications(user_0));
+ assertEquals(0, queue.getNumPendingApplications(user_0));
+ assertTrue(queue.activeApplications.contains(app_1));
+
+ // Submit third application, should remain pending
+ SchedulerApp app_2 = getMockApplication(APPLICATION_ID++, user_0);
+ queue.submitApplication(app_2, user_0, A);
+ assertEquals(2, queue.getNumActiveApplications());
+ assertEquals(1, queue.getNumPendingApplications());
+ assertEquals(2, queue.getNumActiveApplications(user_0));
+ assertEquals(1, queue.getNumPendingApplications(user_0));
+ assertTrue(queue.pendingApplications.contains(app_2));
+
+ // Submit fourth application, should remain pending
+ SchedulerApp app_3 = getMockApplication(APPLICATION_ID++, user_0);
+ queue.submitApplication(app_3, user_0, A);
+ assertEquals(2, queue.getNumActiveApplications());
+ assertEquals(2, queue.getNumPendingApplications());
+ assertEquals(2, queue.getNumActiveApplications(user_0));
+ assertEquals(2, queue.getNumPendingApplications(user_0));
+ assertTrue(queue.pendingApplications.contains(app_3));
+
+ // Kill 3rd pending application
+ queue.finishApplication(app_2, A);
+ assertEquals(2, queue.getNumActiveApplications());
+ assertEquals(1, queue.getNumPendingApplications());
+ assertEquals(2, queue.getNumActiveApplications(user_0));
+ assertEquals(1, queue.getNumPendingApplications(user_0));
+ assertFalse(queue.pendingApplications.contains(app_2));
+ assertFalse(queue.activeApplications.contains(app_2));
+
+ // Finish 1st application, app_3 should become active
+ queue.finishApplication(app_0, A);
+ assertEquals(2, queue.getNumActiveApplications());
+ assertEquals(0, queue.getNumPendingApplications());
+ assertEquals(2, queue.getNumActiveApplications(user_0));
+ assertEquals(0, queue.getNumPendingApplications(user_0));
+ assertTrue(queue.activeApplications.contains(app_3));
+ assertFalse(queue.pendingApplications.contains(app_3));
+ assertFalse(queue.activeApplications.contains(app_0));
+
+ // Finish 2nd application
+ queue.finishApplication(app_1, A);
+ assertEquals(1, queue.getNumActiveApplications());
+ assertEquals(0, queue.getNumPendingApplications());
+ assertEquals(1, queue.getNumActiveApplications(user_0));
+ assertEquals(0, queue.getNumPendingApplications(user_0));
+ assertFalse(queue.activeApplications.contains(app_1));
+
+ // Finish 4th application
+ queue.finishApplication(app_3, A);
+ assertEquals(0, queue.getNumActiveApplications());
+ assertEquals(0, queue.getNumPendingApplications());
+ assertEquals(0, queue.getNumActiveApplications(user_0));
+ assertEquals(0, queue.getNumPendingApplications(user_0));
+ assertFalse(queue.activeApplications.contains(app_3));
+ }
+
+ @Test
public void testHeadroom() throws Exception {
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java Fri Feb 10 01:49:08 2012
@@ -18,11 +18,12 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+import static org.junit.Assert.assertEquals;
import java.io.IOException;
+import java.util.List;
import junit.framework.Assert;
-import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -47,6 +48,21 @@ import org.junit.Test;
public class TestCapacityScheduler {
private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class);
+ private static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
+ private static final String B = CapacitySchedulerConfiguration.ROOT + ".b";
+ private static final String A1 = A + ".a1";
+ private static final String A2 = A + ".a2";
+ private static final String B1 = B + ".b1";
+ private static final String B2 = B + ".b2";
+ private static final String B3 = B + ".b3";
+ private static int A_CAPACITY = 10;
+ private static int B_CAPACITY = 90;
+ private static int A1_CAPACITY = 30;
+ private static int A2_CAPACITY = 70;
+ private static int B1_CAPACITY = 50;
+ private static int B2_CAPACITY = 30;
+ private static int B3_CAPACITY = 20;
+
private ResourceManager resourceManager = null;
@Before
@@ -200,35 +216,102 @@ public class TestCapacityScheduler {
conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"});
conf.setCapacity(CapacitySchedulerConfiguration.ROOT, 100);
- final String A = CapacitySchedulerConfiguration.ROOT + ".a";
- conf.setCapacity(A, 10);
-
- final String B = CapacitySchedulerConfiguration.ROOT + ".b";
- conf.setCapacity(B, 90);
+ conf.setCapacity(A, A_CAPACITY);
+ conf.setCapacity(B, B_CAPACITY);
// Define 2nd-level queues
- final String A1 = A + ".a1";
- final String A2 = A + ".a2";
conf.setQueues(A, new String[] {"a1", "a2"});
- conf.setCapacity(A1, 30);
+ conf.setCapacity(A1, A1_CAPACITY);
conf.setUserLimitFactor(A1, 100.0f);
- conf.setCapacity(A2, 70);
+ conf.setCapacity(A2, A2_CAPACITY);
conf.setUserLimitFactor(A2, 100.0f);
- final String B1 = B + ".b1";
- final String B2 = B + ".b2";
- final String B3 = B + ".b3";
conf.setQueues(B, new String[] {"b1", "b2", "b3"});
- conf.setCapacity(B1, 50);
+ conf.setCapacity(B1, B1_CAPACITY);
conf.setUserLimitFactor(B1, 100.0f);
- conf.setCapacity(B2, 30);
+ conf.setCapacity(B2, B2_CAPACITY);
conf.setUserLimitFactor(B2, 100.0f);
- conf.setCapacity(B3, 20);
+ conf.setCapacity(B3, B3_CAPACITY);
conf.setUserLimitFactor(B3, 100.0f);
LOG.info("Setup top-level queues a and b");
}
+ @Test
+ public void testRefreshQueues() throws Exception {
+ CapacityScheduler cs = new CapacityScheduler();
+ CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+ setupQueueConfiguration(conf);
+ cs.reinitialize(conf, null, null);
+ checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY);
+
+ conf.setCapacity(A, 80);
+ conf.setCapacity(B, 20);
+ cs.reinitialize(conf, null,null);
+ checkQueueCapacities(cs, 80, 20);
+ }
+
+ private void checkQueueCapacities(CapacityScheduler cs,
+ int capacityA, int capacityB) {
+ CSQueue rootQueue = cs.getRootQueue();
+ CSQueue queueA = findQueue(rootQueue, A);
+ CSQueue queueB = findQueue(rootQueue, B);
+ CSQueue queueA1 = findQueue(queueA, A1);
+ CSQueue queueA2 = findQueue(queueA, A2);
+ CSQueue queueB1 = findQueue(queueB, B1);
+ CSQueue queueB2 = findQueue(queueB, B2);
+ CSQueue queueB3 = findQueue(queueB, B3);
+
+ float capA = capacityA / 100.0f;
+ float capB = capacityB / 100.0f;
+
+ checkQueueCapacity(queueA, capA, capA, 1.0f, 1.0f);
+ checkQueueCapacity(queueB, capB, capB, 1.0f, 1.0f);
+ checkQueueCapacity(queueA1, A1_CAPACITY / 100.0f,
+ (A1_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
+ checkQueueCapacity(queueA2, (float)A2_CAPACITY / 100.0f,
+ (A2_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
+ checkQueueCapacity(queueB1, (float)B1_CAPACITY / 100.0f,
+ (B1_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
+ checkQueueCapacity(queueB2, (float)B2_CAPACITY / 100.0f,
+ (B2_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
+ checkQueueCapacity(queueB3, (float)B3_CAPACITY / 100.0f,
+ (B3_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
+ }
+
+ private void checkQueueCapacity(CSQueue q, float expectedCapacity,
+ float expectedAbsCapacity, float expectedMaxCapacity,
+ float expectedAbsMaxCapacity) {
+ final float epsilon = 1e-5f;
+ assertEquals("capacity", expectedCapacity, q.getCapacity(), epsilon);
+ assertEquals("absolute capacity", expectedAbsCapacity,
+ q.getAbsoluteCapacity(), epsilon);
+ assertEquals("maximum capacity", expectedMaxCapacity,
+ q.getMaximumCapacity(), epsilon);
+ assertEquals("absolute maximum capacity", expectedAbsMaxCapacity,
+ q.getAbsoluteMaximumCapacity(), epsilon);
+ }
+
+ private CSQueue findQueue(CSQueue root, String queuePath) {
+ if (root.getQueuePath().equals(queuePath)) {
+ return root;
+ }
+
+ List<CSQueue> childQueues = root.getChildQueues();
+ if (childQueues != null) {
+ for (CSQueue q : childQueues) {
+ if (queuePath.startsWith(q.getQueuePath())) {
+ CSQueue result = findQueue(q, queuePath);
+ if (result != null) {
+ return result;
+ }
+ }
+ }
+ }
+
+ return null;
+ }
+
private void checkApplicationResourceUsage(int expected,
Application application) {
Assert.assertEquals(expected, application.getUsedResources().getMemory());
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Fri Feb 10 01:49:08 2012
@@ -38,6 +38,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
@@ -57,6 +59,7 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
import org.junit.After;
@@ -67,6 +70,8 @@ import org.mockito.stubbing.Answer;
public class TestLeafQueue {
+ private static final Log LOG = LogFactory.getLog(TestLeafQueue.class);
+
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
@@ -88,6 +93,7 @@ public class TestLeafQueue {
csConf =
new CapacitySchedulerConfiguration();
+ csConf.setBoolean("yarn.scheduler.capacity.user-metrics.enable", true);
setupQueueConfiguration(csConf);
@@ -251,9 +257,38 @@ public class TestLeafQueue {
// Only 1 container
a.assignContainers(clusterResource, node_0);
- assertEquals(7, a.getMetrics().getAvailableGB());
+ assertEquals(7*GB, a.getMetrics().getAvailableMB());
}
+ @Test
+ public void testAppAttemptMetrics() throws Exception {
+
+ // Manipulate queue 'a'
+ LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B));
+
+ // Users
+ final String user_0 = "user_0";
+
+ // Submit applications
+ final ApplicationAttemptId appAttemptId_0 = TestUtils
+ .getMockApplicationAttemptId(0, 1);
+ SchedulerApp app_0 = new SchedulerApp(appAttemptId_0, user_0, a, null,
+ rmContext, null);
+ a.submitApplication(app_0, user_0, B);
+
+ // Attempt the same application again
+ final ApplicationAttemptId appAttemptId_1 = TestUtils
+ .getMockApplicationAttemptId(0, 2);
+ SchedulerApp app_1 = new SchedulerApp(appAttemptId_1, user_0, a, null,
+ rmContext, null);
+ a.submitApplication(app_1, user_0, B); // same user
+
+ assertEquals(1, a.getMetrics().getAppsSubmitted());
+ assertEquals(1, a.getMetrics().getAppsPending());
+
+ QueueMetrics userMetrics = a.getMetrics().getUserMetrics(user_0);
+ assertEquals(1, userMetrics.getAppsSubmitted());
+ }
@Test
public void testSingleQueueWithOneUser() throws Exception {
@@ -307,9 +342,9 @@ public class TestLeafQueue {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(1, a.getMetrics().getAllocatedGB());
- assertEquals(0, a.getMetrics().getAvailableGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(1*GB, a.getMetrics().getAllocatedMB());
+ assertEquals(0*GB, a.getMetrics().getAvailableMB());
// Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
// you can get one container more than user-limit
@@ -317,16 +352,16 @@ public class TestLeafQueue {
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(2, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Can't allocate 3rd due to user-limit
a.assignContainers(clusterResource, node_0);
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(2, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Bump up user-limit-factor, now allocate should work
a.setUserLimitFactor(10);
@@ -334,16 +369,16 @@ public class TestLeafQueue {
assertEquals(3*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(3, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(3*GB, a.getMetrics().getAllocatedMB());
// One more should work, for app_1, due to user-limit-factor
a.assignContainers(clusterResource, node_0);
assertEquals(4*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(4, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(4*GB, a.getMetrics().getAllocatedMB());
// Test max-capacity
// Now - no more allocs since we are at max-cap
@@ -352,8 +387,8 @@ public class TestLeafQueue {
assertEquals(4*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(4, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(4*GB, a.getMetrics().getAllocatedMB());
// Release each container from app_0
for (RMContainer rmContainer : app_0.getLiveContainers()) {
@@ -363,8 +398,8 @@ public class TestLeafQueue {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(1, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(1*GB, a.getMetrics().getAllocatedMB());
// Release each container from app_1
for (RMContainer rmContainer : app_1.getLiveContainers()) {
@@ -374,9 +409,9 @@ public class TestLeafQueue {
assertEquals(0*GB, a.getUsedResources().getMemory());
assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(0, a.getMetrics().getAllocatedGB());
- assertEquals(1, a.getMetrics().getAvailableGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(0*GB, a.getMetrics().getAllocatedMB());
+ assertEquals(1*GB, a.getMetrics().getAvailableMB());
}
@Test
@@ -473,6 +508,115 @@ public class TestLeafQueue {
}
@Test
+ public void testHeadroomWithMaxCap() throws Exception {
+ // Mock the queue
+ LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
+ //unset maxCapacity
+ a.setMaxCapacity(1.0f);
+
+ // Users
+ final String user_0 = "user_0";
+ final String user_1 = "user_1";
+
+ // Submit applications
+ final ApplicationAttemptId appAttemptId_0 =
+ TestUtils.getMockApplicationAttemptId(0, 0);
+ SchedulerApp app_0 =
+ new SchedulerApp(appAttemptId_0, user_0, a,
+ a.getActiveUsersManager(), rmContext, null);
+ a.submitApplication(app_0, user_0, A);
+
+ final ApplicationAttemptId appAttemptId_1 =
+ TestUtils.getMockApplicationAttemptId(1, 0);
+ SchedulerApp app_1 =
+ new SchedulerApp(appAttemptId_1, user_0, a,
+ a.getActiveUsersManager(), rmContext, null);
+ a.submitApplication(app_1, user_0, A); // same user
+
+ final ApplicationAttemptId appAttemptId_2 =
+ TestUtils.getMockApplicationAttemptId(2, 0);
+ SchedulerApp app_2 =
+ new SchedulerApp(appAttemptId_2, user_1, a,
+ a.getActiveUsersManager(), rmContext, null);
+ a.submitApplication(app_2, user_1, A);
+
+ // Setup some nodes
+ String host_0 = "host_0";
+ SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
+ String host_1 = "host_1";
+ SchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB);
+
+ final int numNodes = 2;
+ Resource clusterResource = Resources.createResource(numNodes * (8*GB));
+ when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+ // Setup resource-requests
+ Priority priority = TestUtils.createMockPriority(1);
+ app_0.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 2*GB, 1, priority,
+ recordFactory)));
+
+ app_1.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 2, priority,
+ recordFactory)));
+
+ /**
+ * Start testing...
+ */
+
+ // Set user-limit
+ a.setUserLimit(50);
+ a.setUserLimitFactor(2);
+
+ // Now, only user_0 should be active since he is the only one with
+ // outstanding requests
+ assertEquals("There should only be 1 active user!",
+ 1, a.getActiveUsersManager().getNumActiveUsers());
+
+ // 1 container to user_0
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(2*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_0.getHeadroom().getMemory()); // User limit = 2G
+ assertEquals(0*GB, app_0.getHeadroom().getMemory()); // User limit = 2G
+
+ // Again one to user_0 since he hasn't exceeded user limit yet
+ a.assignContainers(clusterResource, node_0);
+ assertEquals(3*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_0.getHeadroom().getMemory()); // 3G - 2G
+ assertEquals(0*GB, app_0.getHeadroom().getMemory()); // 3G - 2G
+
+ // Submit requests for app_1 and set max-cap
+ a.setMaxCapacity(.1f);
+ app_2.updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 1, priority,
+ recordFactory)));
+ assertEquals(2, a.getActiveUsersManager().getNumActiveUsers());
+
+ // No more to user_0 since he is already over user-limit
+ // and no more containers to queue since it's already at max-cap
+ a.assignContainers(clusterResource, node_1);
+ assertEquals(3*GB, a.getUsedResources().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_0.getHeadroom().getMemory());
+ assertEquals(0*GB, app_1.getHeadroom().getMemory());
+
+ // Check headroom for app_2
+ LOG.info("here");
+ app_1.updateResourceRequests(Collections.singletonList( // unset
+ TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 0, priority,
+ recordFactory)));
+ assertEquals(1, a.getActiveUsersManager().getNumActiveUsers());
+ a.assignContainers(clusterResource, node_1);
+ assertEquals(1*GB, app_2.getHeadroom().getMemory()); // hit queue max-cap
+ }
+
+ @Test
public void testSingleQueueWithMultipleUsers() throws Exception {
// Mock the queue
@@ -700,9 +844,9 @@ public class TestLeafQueue {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(1, a.getMetrics().getAllocatedGB());
- assertEquals(0, a.getMetrics().getAvailableGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(1*GB, a.getMetrics().getAllocatedMB());
+ assertEquals(0*GB, a.getMetrics().getAvailableMB());
// Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
// you can get one container more than user-limit
@@ -710,8 +854,8 @@ public class TestLeafQueue {
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(2, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Now, reservation should kick in for app_1
a.assignContainers(clusterResource, node_0);
@@ -720,8 +864,8 @@ public class TestLeafQueue {
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
assertEquals(2*GB, node_0.getUsedResource().getMemory());
- assertEquals(4, a.getMetrics().getReservedGB());
- assertEquals(2, a.getMetrics().getAllocatedGB());
+ assertEquals(4*GB, a.getMetrics().getReservedMB());
+ assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Now free 1 container from app_0 i.e. 1G
a.completedContainer(clusterResource, app_0, node_0,
@@ -732,8 +876,8 @@ public class TestLeafQueue {
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
assertEquals(1*GB, node_0.getUsedResource().getMemory());
- assertEquals(4, a.getMetrics().getReservedGB());
- assertEquals(1, a.getMetrics().getAllocatedGB());
+ assertEquals(4*GB, a.getMetrics().getReservedMB());
+ assertEquals(1*GB, a.getMetrics().getAllocatedMB());
// Now finish another container from app_0 and fulfill the reservation
a.completedContainer(clusterResource, app_0, node_0,
@@ -744,8 +888,8 @@ public class TestLeafQueue {
assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentReservation().getMemory());
assertEquals(4*GB, node_0.getUsedResource().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(4, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(4*GB, a.getMetrics().getAllocatedMB());
}
@Test
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java Fri Feb 10 01:49:08 2012
@@ -86,7 +86,7 @@ public class TestParentQueue {
private SchedulerApp getMockApplication(int appId, String user) {
SchedulerApp application = mock(SchedulerApp.class);
doReturn(user).when(application).getUser();
- doReturn(null).when(application).getHeadroom();
+ doReturn(Resources.createResource(0)).when(application).getHeadroom();
return application;
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java Fri Feb 10 01:49:08 2012
@@ -26,17 +26,28 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.Application;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.Task;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.util.BuilderUtils;
import org.junit.After;
import org.junit.Before;
+import org.junit.Test;
public class TestFifoScheduler {
private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class);
@@ -63,7 +74,30 @@ public class TestFifoScheduler {
.getRMContext());
}
+ @Test
+ public void testAppAttemptMetrics() throws Exception {
+ AsyncDispatcher dispatcher = new InlineDispatcher();
+ RMContext rmContext = new RMContextImpl(null, dispatcher, null, null, null);
+ FifoScheduler schedular = new FifoScheduler();
+ schedular.reinitialize(new Configuration(), null, rmContext);
+
+ ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
+ ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
+ appId, 1);
+
+ SchedulerEvent event = new AppAddedSchedulerEvent(appAttemptId, "queue",
+ "user");
+ schedular.handle(event);
+
+ appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2);
+
+ event = new AppAddedSchedulerEvent(appAttemptId, "queue", "user");
+ schedular.handle(event);
+
+ QueueMetrics metrics = schedular.getRootQueueMetrics();
+ Assert.assertEquals(1, metrics.getAppsSubmitted());
+ }
// @Test
public void testFifoScheduler() throws Exception {
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java Fri Feb 10 01:49:08 2012
@@ -22,6 +22,7 @@ import java.io.PrintWriter;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodesPage.NodesBlock;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Before;
@@ -39,30 +40,37 @@ import com.google.inject.Module;
public class TestNodesPage {
final int numberOfRacks = 2;
- final int numberOfNodesPerRack = 2;
+ final int numberOfNodesPerRack = 6;
+ // The following is because of the way TestRMWebApp.mockRMContext creates
+ // nodes.
+ final int numberOfLostNodesPerRack = numberOfNodesPerRack
+ / RMNodeState.values().length;
+
// Number of Actual Table Headers for NodesPage.NodesBlock might change in
// future. In that case this value should be adjusted to the new value.
- final int numberOfThInMetricsTable = 10;
+ final int numberOfThInMetricsTable = 13;
final int numberOfActualTableHeaders = 10;
private Injector injector;
@Before
public void setUp() throws Exception {
- injector = WebAppTests.createMockInjector(RMContext.class, TestRMWebApp
- .mockRMContext(3, numberOfRacks, numberOfNodesPerRack,
- 8 * TestRMWebApp.GiB), new Module() {
- @Override
- public void configure(Binder binder) {
- try {
- binder.bind(ResourceManager.class).toInstance(
- TestRMWebApp.mockRm(3, numberOfRacks, numberOfNodesPerRack,
- 8 * TestRMWebApp.GiB));
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
- }
- });
+ final RMContext mockRMContext =
+ TestRMWebApp.mockRMContext(3, numberOfRacks, numberOfNodesPerRack,
+ 8 * TestRMWebApp.GiB);
+ injector =
+ WebAppTests.createMockInjector(RMContext.class, mockRMContext,
+ new Module() {
+ @Override
+ public void configure(Binder binder) {
+ try {
+ binder.bind(ResourceManager.class).toInstance(
+ TestRMWebApp.mockRm(mockRMContext));
+ } catch (IOException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+ });
}
@Test
@@ -94,7 +102,7 @@ public class TestNodesPage {
.print("<th");
Mockito.verify(
writer,
- Mockito.times(numberOfRacks * numberOfNodesPerRack
+ Mockito.times(numberOfRacks * numberOfLostNodesPerRack
* numberOfActualTableHeaders + numberOfThInMetricsTable)).print(
"<td");
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java Fri Feb 10 01:49:08 2012
@@ -38,13 +38,16 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.MockAsm;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Test;
@@ -74,7 +77,7 @@ public class TestRMWebApp {
@Test public void testView() {
Injector injector = WebAppTests.createMockInjector(RMContext.class,
- mockRMContext(3, 1, 2, 8*GiB),
+ mockRMContext(15, 1, 2, 8*GiB),
new Module() {
@Override
public void configure(Binder binder) {
@@ -85,25 +88,45 @@ public class TestRMWebApp {
}
}
});
- injector.getInstance(RmView.class).render();
+ RmView rmViewInstance = injector.getInstance(RmView.class);
+ rmViewInstance.set(YarnWebParams.APP_STATE, RMAppState.RUNNING.toString());
+ rmViewInstance.render();
WebAppTests.flushOutput(injector);
}
@Test public void testNodesPage() {
+ // 10 nodes. Two of each type.
+ final RMContext rmContext = mockRMContext(3, 2, 12, 8*GiB);
Injector injector = WebAppTests.createMockInjector(RMContext.class,
- mockRMContext(3, 1, 2, 8*GiB),
+ rmContext,
new Module() {
@Override
public void configure(Binder binder) {
try {
- binder.bind(ResourceManager.class).toInstance(mockRm(3, 1, 2, 8*GiB));
+ binder.bind(ResourceManager.class).toInstance(mockRm(rmContext));
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
});
- injector.getInstance(NodesPage.class).render();
+
+ // All nodes
+ NodesPage instance = injector.getInstance(NodesPage.class);
+ instance.render();
+ WebAppTests.flushOutput(injector);
+
+ // Unhealthy nodes
+ instance.moreParams().put(YarnWebParams.NODE_STATE,
+ RMNodeState.UNHEALTHY.toString());
+ instance.render();
WebAppTests.flushOutput(injector);
+
+ // Lost nodes
+ instance.moreParams().put(YarnWebParams.NODE_STATE,
+ RMNodeState.LOST.toString());
+ instance.render();
+ WebAppTests.flushOutput(injector);
+
}
public static RMContext mockRMContext(int numApps, int racks, int numNodes,
@@ -121,11 +144,12 @@ public class TestRMWebApp {
nodesMap.put(node.getNodeID(), node);
}
- final List<RMNode> lostNodes = MockNodes.lostNodes(racks, numNodes,
- newResource(mbsPerNode));
- final ConcurrentMap<String, RMNode> lostNodesMap = Maps.newConcurrentMap();
- for (RMNode node : lostNodes) {
- lostNodesMap.put(node.getHostName(), node);
+ final List<RMNode> deactivatedNodes =
+ MockNodes.deactivatedNodes(racks, numNodes, newResource(mbsPerNode));
+ final ConcurrentMap<String, RMNode> deactivatedNodesMap =
+ Maps.newConcurrentMap();
+ for (RMNode node : deactivatedNodes) {
+ deactivatedNodesMap.put(node.getHostName(), node);
}
return new RMContextImpl(new MemStore(), null, null, null, null) {
@Override
@@ -134,7 +158,7 @@ public class TestRMWebApp {
}
@Override
public ConcurrentMap<String, RMNode> getInactiveRMNodes() {
- return lostNodesMap;
+ return deactivatedNodesMap;
}
@Override
public ConcurrentMap<NodeId, RMNode> getRMNodes() {
@@ -145,9 +169,13 @@ public class TestRMWebApp {
public static ResourceManager mockRm(int apps, int racks, int nodes,
int mbsPerNode) throws IOException {
- ResourceManager rm = mock(ResourceManager.class);
RMContext rmContext = mockRMContext(apps, racks, nodes,
- mbsPerNode);
+ mbsPerNode);
+ return mockRm(rmContext);
+ }
+
+ public static ResourceManager mockRm(RMContext rmContext) throws IOException {
+ ResourceManager rm = mock(ResourceManager.class);
ResourceScheduler rs = mockCapacityScheduler();
when(rm.getResourceScheduler()).thenReturn(rs);
when(rm.getRMContext()).thenReturn(rmContext);
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java Fri Feb 10 01:49:08 2012
@@ -361,6 +361,7 @@ public class TestRMWebServices extends J
verifyClusterMetrics(
WebServicesTestUtils.getXmlInt(element, "appsSubmitted"),
+ WebServicesTestUtils.getXmlInt(element, "appsCompleted"),
WebServicesTestUtils.getXmlInt(element, "reservedMB"),
WebServicesTestUtils.getXmlInt(element, "availableMB"),
WebServicesTestUtils.getXmlInt(element, "allocatedMB"),
@@ -379,8 +380,9 @@ public class TestRMWebServices extends J
Exception {
assertEquals("incorrect number of elements", 1, json.length());
JSONObject clusterinfo = json.getJSONObject("clusterMetrics");
- assertEquals("incorrect number of elements", 12, clusterinfo.length());
- verifyClusterMetrics(clusterinfo.getInt("appsSubmitted"),
+ assertEquals("incorrect number of elements", 19, clusterinfo.length());
+ verifyClusterMetrics(
+ clusterinfo.getInt("appsSubmitted"), clusterinfo.getInt("appsCompleted"),
clusterinfo.getInt("reservedMB"), clusterinfo.getInt("availableMB"),
clusterinfo.getInt("allocatedMB"),
clusterinfo.getInt("containersAllocated"),
@@ -390,7 +392,8 @@ public class TestRMWebServices extends J
clusterinfo.getInt("rebootedNodes"),clusterinfo.getInt("activeNodes"));
}
- public void verifyClusterMetrics(int sub, int reservedMB, int availableMB,
+ public void verifyClusterMetrics(int submittedApps, int completedApps,
+ int reservedMB, int availableMB,
int allocMB, int containersAlloc, int totalMB, int totalNodes,
int lostNodes, int unhealthyNodes, int decommissionedNodes,
int rebootedNodes, int activeNodes) throws JSONException, Exception {
@@ -398,19 +401,21 @@ public class TestRMWebServices extends J
ResourceScheduler rs = rm.getResourceScheduler();
QueueMetrics metrics = rs.getRootQueueMetrics();
ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
- final long MB_IN_GB = 1024;
- long totalMBExpect = (metrics.getReservedGB() * MB_IN_GB)
- + (metrics.getAvailableGB() * MB_IN_GB)
- + (metrics.getAllocatedGB() * MB_IN_GB);
-
- assertEquals("appsSubmitted doesn't match", metrics.getAppsSubmitted(), sub);
+ long totalMBExpect =
+ metrics.getReservedMB()+ metrics.getAvailableMB()
+ + metrics.getAllocatedMB();
+
+ assertEquals("appsSubmitted doesn't match",
+ metrics.getAppsSubmitted(), submittedApps);
+ assertEquals("appsCompleted doesn't match",
+ metrics.getAppsCompleted(), completedApps);
assertEquals("reservedMB doesn't match",
- metrics.getReservedGB() * MB_IN_GB, reservedMB);
- assertEquals("availableMB doesn't match", metrics.getAvailableGB()
- * MB_IN_GB, availableMB);
- assertEquals("allocatedMB doesn't match", metrics.getAllocatedGB()
- * MB_IN_GB, allocMB);
+ metrics.getReservedMB(), reservedMB);
+ assertEquals("availableMB doesn't match",
+ metrics.getAvailableMB(), availableMB);
+ assertEquals("allocatedMB doesn't match",
+ metrics.getAllocatedMB(), allocMB);
assertEquals("containersAllocated doesn't match", 0, containersAlloc);
assertEquals("totalMB doesn't match", totalMBExpect, totalMB);
assertEquals(
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java Fri Feb 10 01:49:08 2012
@@ -55,6 +55,8 @@ import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
+import clover.org.jfree.util.Log;
+
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
@@ -124,6 +126,46 @@ public class TestRMWebServicesNodes exte
}
@Test
+ public void testNodesDefaultWithUnHealthyNode() throws JSONException,
+ Exception {
+
+ WebResource r = resource();
+ MockNM nm1 = rm.registerNode("h1:1234", 5120);
+ MockNM nm2 = rm.registerNode("h2:1235", 5121);
+ rm.sendNodeStarted(nm1);
+ rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW);
+
+ // One unhealthy node which should not appear in the list after
+ // MAPREDUCE-3760.
+ MockNM nm3 = rm.registerNode("h3:1236", 5122);
+ rm.NMwaitForState(nm3.getNodeId(), RMNodeState.NEW);
+ rm.sendNodeStarted(nm3);
+ rm.NMwaitForState(nm3.getNodeId(), RMNodeState.RUNNING);
+ RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes()
+ .get(nm3.getNodeId());
+ NodeHealthStatus nodeHealth = node.getNodeHealthStatus();
+ nodeHealth.setHealthReport("test health report");
+ nodeHealth.setIsNodeHealthy(false);
+ node.handle(new RMNodeStatusEvent(nm3.getNodeId(), nodeHealth,
+ new ArrayList<ContainerStatus>(), null, null));
+ rm.NMwaitForState(nm3.getNodeId(), RMNodeState.UNHEALTHY);
+
+ ClientResponse response =
+ r.path("ws").path("v1").path("cluster").path("nodes")
+ .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+ JSONObject nodes = json.getJSONObject("nodes");
+ assertEquals("incorrect number of elements", 1, nodes.length());
+ JSONArray nodeArray = nodes.getJSONArray("node");
+ // Just 2 nodes, leaving behind the unhealthy node.
+ assertEquals("incorrect number of elements", 2, nodeArray.length());
+ }
+
+ @Test
public void testNodesQueryState() throws JSONException, Exception {
WebResource r = resource();
MockNM nm1 = rm.registerNode("h1:1234", 5120);
Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java Fri Feb 10 01:49:08 2012
@@ -81,11 +81,11 @@ public class MiniYARNCluster extends Com
*/
public MiniYARNCluster(String testName, int noOfNodeManagers,
int numLocalDirs, int numLogDirs) {
-
- super(testName);
+ super(testName.replace("$", ""));
this.numLocalDirs = numLocalDirs;
this.numLogDirs = numLogDirs;
- this.testWorkDir = new File("target", testName);
+ this.testWorkDir = new File("target",
+ testName.replace("$", ""));
try {
FileContext.getLocalFSFileContext().delete(
new Path(testWorkDir.getAbsolutePath()), true);