You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by to...@apache.org on 2012/04/03 23:48:31 UTC
svn commit: r1309164 [2/2] - in
/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project: ./ conf/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/mai...
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java?rev=1309164&r1=1309163&r2=1309164&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java Tue Apr 3 21:48:23 2012
@@ -151,7 +151,12 @@ public class TestRMWebServicesCapacitySc
conf.setUserLimitFactor(B2, 100.0f);
conf.setCapacity(B3, 20);
conf.setUserLimitFactor(B3, 100.0f);
-
+
+ conf.setQueues(A1, new String[] {"a1a", "a1b"});
+ final String A1A = A1 + ".a1a";
+ conf.setCapacity(A1A, 85);
+ final String A1B = A1 + ".a1b";
+ conf.setCapacity(A1B, 15);
}
@Before
@@ -232,12 +237,18 @@ public class TestRMWebServicesCapacitySc
WebServicesTestUtils.getXmlFloat(element, "maxCapacity"),
WebServicesTestUtils.getXmlString(element, "queueName"));
- NodeList queues = element.getElementsByTagName("queues");
- for (int j = 0; j < queues.getLength(); j++) {
- Element qElem = (Element) queues.item(j);
- String qName = WebServicesTestUtils.getXmlString(qElem, "queueName");
- String q = CapacitySchedulerConfiguration.ROOT + "." + qName;
- verifySubQueueXML(qElem, q, 100, 100);
+ NodeList children = element.getChildNodes();
+ for (int j = 0; j < children.getLength(); j++) {
+ Element qElem = (Element) children.item(j);
+ if(qElem.getTagName().equals("queues")) {
+ NodeList qListInfos = qElem.getChildNodes();
+ for (int k = 0; k < qListInfos.getLength(); k++) {
+ Element qElem2 = (Element) qListInfos.item(k);
+ String qName2 = WebServicesTestUtils.getXmlString(qElem2, "queueName");
+ String q2 = CapacitySchedulerConfiguration.ROOT + "." + qName2;
+ verifySubQueueXML(qElem2, q2, 100, 100);
+ }
+ }
}
}
}
@@ -245,8 +256,18 @@ public class TestRMWebServicesCapacitySc
public void verifySubQueueXML(Element qElem, String q,
float parentAbsCapacity, float parentAbsMaxCapacity)
throws Exception {
- NodeList queues = qElem.getElementsByTagName("subQueues");
- QueueInfo qi = (queues != null) ? new QueueInfo() : new LeafQueueInfo();
+ NodeList children = qElem.getChildNodes();
+ boolean hasSubQueues = false;
+ for (int j = 0; j < children.getLength(); j++) {
+ Element qElem2 = (Element) children.item(j);
+ if(qElem2.getTagName().equals("queues")) {
+ NodeList qListInfos = qElem2.getChildNodes();
+ if (qListInfos.getLength() > 0) {
+ hasSubQueues = true;
+ }
+ }
+ }
+ QueueInfo qi = (hasSubQueues) ? new QueueInfo() : new LeafQueueInfo();
qi.capacity = WebServicesTestUtils.getXmlFloat(qElem, "capacity");
qi.usedCapacity =
WebServicesTestUtils.getXmlFloat(qElem, "usedCapacity");
@@ -263,14 +284,18 @@ public class TestRMWebServicesCapacitySc
qi.queueName = WebServicesTestUtils.getXmlString(qElem, "queueName");
qi.state = WebServicesTestUtils.getXmlString(qElem, "state");
verifySubQueueGeneric(q, qi, parentAbsCapacity, parentAbsMaxCapacity);
-
- if (queues != null) {
- for (int j = 0; j < queues.getLength(); j++) {
- Element subqElem = (Element) queues.item(j);
- String qName = WebServicesTestUtils.getXmlString(subqElem, "queueName");
- String q2 = q + "." + qName;
- verifySubQueueXML(subqElem, q2,
- qi.absoluteCapacity, qi.absoluteMaxCapacity);
+ if (hasSubQueues) {
+ for (int j = 0; j < children.getLength(); j++) {
+ Element qElem2 = (Element) children.item(j);
+ if(qElem2.getTagName().equals("queues")) {
+ NodeList qListInfos = qElem2.getChildNodes();
+ for (int k = 0; k < qListInfos.getLength(); k++) {
+ Element qElem3 = (Element) qListInfos.item(k);
+ String qName3 = WebServicesTestUtils.getXmlString(qElem3, "queueName");
+ String q3 = q + "." + qName3;
+ verifySubQueueXML(qElem3, q3, qi.absoluteCapacity, qi.absoluteMaxCapacity);
+ }
+ }
}
} else {
LeafQueueInfo lqi = (LeafQueueInfo) qi;
@@ -307,7 +332,7 @@ public class TestRMWebServicesCapacitySc
(float) info.getDouble("capacity"),
(float) info.getDouble("maxCapacity"), info.getString("queueName"));
- JSONArray arr = info.getJSONArray("queues");
+ JSONArray arr = info.getJSONObject("queues").getJSONArray("queue");
assertEquals("incorrect number of elements", 2, arr.length());
// test subqueues
@@ -333,7 +358,7 @@ public class TestRMWebServicesCapacitySc
throws JSONException, Exception {
int numExpectedElements = 11;
boolean isParentQueue = true;
- if (!info.has("subQueues")) {
+ if (!info.has("queues")) {
numExpectedElements = 20;
isParentQueue = false;
}
@@ -354,7 +379,7 @@ public class TestRMWebServicesCapacitySc
verifySubQueueGeneric(q, qi, parentAbsCapacity, parentAbsMaxCapacity);
if (isParentQueue) {
- JSONArray arr = info.getJSONArray("subQueues");
+ JSONArray arr = info.getJSONObject("queues").getJSONArray("queue");
// test subqueues
for (int i = 0; i < arr.length(); i++) {
JSONObject obj = arr.getJSONObject(i);
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java?rev=1309164&r1=1309163&r2=1309164&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java Tue Apr 3 21:48:23 2012
@@ -37,9 +37,9 @@ import javax.servlet.http.HttpServletReq
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.httpclient.Header;
+import org.apache.commons.httpclient.HostConfiguration;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpMethod;
-import org.apache.commons.httpclient.HostConfiguration;
import org.apache.commons.httpclient.cookie.CookiePolicy;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.params.HttpClientParams;
@@ -260,7 +260,24 @@ public class WebAppProxyServlet extends
URI trackingUri = ProxyUriUtils.getUriFromAMUrl(
applicationReport.getOriginalTrackingUrl());
if(applicationReport.getOriginalTrackingUrl().equals("N/A")) {
- notFound(resp, "The MRAppMaster died before writing anything.");
+ String message;
+ switch(applicationReport.getFinalApplicationStatus()) {
+ case FAILED:
+ case KILLED:
+ case SUCCEEDED:
+ message =
+ "The requested application exited before setting a tracking URL.";
+ break;
+ case UNDEFINED:
+ message = "The requested application does not appear to be running "
+ +"yet, and has not set a tracking URL.";
+ break;
+ default:
+ //This should never happen, but just to be safe
+ message = "The requested application has not set a tracking URL.";
+ break;
+ }
+ notFound(resp, message);
return;
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm?rev=1309164&r1=1309163&r2=1309164&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm Tue Apr 3 21:48:23 2012
@@ -149,6 +149,7 @@ History Server REST API's.
------
* user - user name
+ * state - the job state
* queue - queue name
* limit - total number of app objects to be returned
* startedTimeBegin - jobs with start time beginning with this time, specified in ms since epoch
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm?rev=1309164&r1=1309163&r2=1309164&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm Tue Apr 3 21:48:23 2012
@@ -324,7 +324,7 @@ ResourceManager REST API's.
| queues | array of queues(JSON)/zero or more queue objects(XML) | A collection of queue resources|
*---------------+--------------+-------------------------------+
-** Elements of the queues/subQueues object for a Parent queue
+** Elements of the queues object for a Parent queue
*---------------+--------------+-------------------------------+
|| Item || Data Type || Description |
@@ -349,10 +349,10 @@ ResourceManager REST API's.
*---------------+--------------+-------------------------------+
| state | string of QueueState | The state of the queue |
*---------------+--------------+-------------------------------+
-| subQueues | array of queues(JSON)/zero or more queue objects(XML) | A collection of sub-queue information|
+| queues | array of queues(JSON)/zero or more queue objects(XML) | A collection of sub-queue information|
*---------------+--------------+-------------------------------+
-** Elements of the queues/subQueues object for a Leaf queue - contains all elements in parent plus the following:
+** Elements of the queues object for a Leaf queue - contains all elements in parent plus the following:
*---------------+--------------+-------------------------------+
|| Item || Data Type || Description |
@@ -406,168 +406,190 @@ ResourceManager REST API's.
"queueName" : "root",
"maxCapacity" : 100,
"type" : "capacityScheduler",
- "queues" : [
- {
- "numPendingApplications" : 0,
- "queueName" : "default",
- "userLimitFactor" : 1,
- "maxApplications" : 7000,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 90,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 90,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 70,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 70,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 7000
- },
- {
- "queueName" : "test",
- "absoluteUsedCapacity" : 0,
- "absoluteCapacity" : 20,
- "usedCapacity" : 0,
- "capacity" : 20,
- "subQueues" : [
- {
- "numPendingApplications" : 0,
- "queueName" : "a1",
- "userLimitFactor" : 1,
- "maxApplications" : 1200,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 80,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 16.000002,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 12,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 60.000004,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 1200
+ "queues" : {
+ "queue" : [
+ {
+ "numPendingApplications" : 0,
+ "queueName" : "default",
+ "userLimitFactor" : 1,
+ "maxApplications" : 1,
+ "usedCapacity" : 0,
+ "numContainers" : 0,
+ "state" : "RUNNING",
+ "maxCapacity" : 90,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 90,
+ "maxActiveApplications" : 1,
+ "numActiveApplications" : 0,
+ "absoluteUsedCapacity" : 0,
+ "userLimit" : 100,
+ "absoluteCapacity" : 70,
+ "maxActiveApplicationsPerUser" : 1,
+ "capacity" : 70,
+ "type" : "capacitySchedulerLeafQueueInfo",
+ "maxApplicationsPerUser" : 1
+ },
+ {
+ "queueName" : "test",
+ "absoluteCapacity" : 20,
+ "usedCapacity" : 0,
+ "capacity" : 20,
+ "state" : "RUNNING",
+ "maxCapacity" : 100,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 100,
+ "queues" : {
+ "queue" : [
+ {
+ "queueName" : "a1",
+ "absoluteCapacity" : 12,
+ "usedCapacity" : 0,
+ "capacity" : 60.000004,
+ "state" : "RUNNING",
+ "maxCapacity" : 100,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 100,
+ "queues" : {
+ "queue" : [
+ {
+ "numPendingApplications" : 0,
+ "queueName" : "a11",
+ "userLimitFactor" : 1,
+ "maxApplications" : 0,
+ "usedCapacity" : 0,
+ "numContainers" : 0,
+ "state" : "RUNNING",
+ "maxCapacity" : 100,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 100,
+ "maxActiveApplications" : 1,
+ "numActiveApplications" : 0,
+ "absoluteUsedCapacity" : 0,
+ "userLimit" : 100,
+ "absoluteCapacity" : 10.200001,
+ "maxActiveApplicationsPerUser" : 1,
+ "capacity" : 85,
+ "type" : "capacitySchedulerLeafQueueInfo",
+ "maxApplicationsPerUser" : 0
+ },
+ {
+ "numPendingApplications" : 0,
+ "queueName" : "a12",
+ "userLimitFactor" : 1,
+ "maxApplications" : 0,
+ "usedCapacity" : 0,
+ "numContainers" : 0,
+ "state" : "RUNNING",
+ "maxCapacity" : 100,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 100,
+ "maxActiveApplications" : 1,
+ "numActiveApplications" : 0,
+ "absoluteUsedCapacity" : 0,
+ "userLimit" : 100,
+ "absoluteCapacity" : 1.8000001,
+ "maxActiveApplicationsPerUser" : 1,
+ "capacity" : 15.000001,
+ "type" : "capacitySchedulerLeafQueueInfo",
+ "maxApplicationsPerUser" : 0
+ }
+ ]
+ },
+ "absoluteUsedCapacity" : 0
+ },
+ {
+ "numPendingApplications" : 0,
+ "queueName" : "a2",
+ "userLimitFactor" : 1,
+ "maxApplications" : 0,
+ "usedCapacity" : 0,
+ "numContainers" : 0,
+ "state" : "RUNNING",
+ "maxCapacity" : 100,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 100,
+ "maxActiveApplications" : 1,
+ "numActiveApplications" : 0,
+ "absoluteUsedCapacity" : 0,
+ "userLimit" : 100,
+ "absoluteCapacity" : 8.000001,
+ "maxActiveApplicationsPerUser" : 1,
+ "capacity" : 40,
+ "type" : "capacitySchedulerLeafQueueInfo",
+ "maxApplicationsPerUser" : 0
+ }
+ ]
},
- {
- "numPendingApplications" : 0,
- "queueName" : "a2",
- "userLimitFactor" : 1,
- "maxApplications" : 800,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 8.000001,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 40,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 800
- }
- ],
- "state" : "RUNNING",
- "maxCapacity" : 80,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 80
- },
- {
- "queueName" : "test2",
- "absoluteUsedCapacity" : 0,
- "absoluteCapacity" : 10,
- "usedCapacity" : 0,
- "capacity" : 10,
- "subQueues" : [
- {
- "numPendingApplications" : 0,
- "queueName" : "a5",
- "userLimitFactor" : 1,
- "maxApplications" : 500,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 5,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 50,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 500
+ "absoluteUsedCapacity" : 0
+ },
+ {
+ "queueName" : "test2",
+ "absoluteCapacity" : 10,
+ "usedCapacity" : 0,
+ "capacity" : 10,
+ "state" : "RUNNING",
+ "maxCapacity" : 15.000001,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 15.000001,
+ "queues" : {
+ "queue" : [
+ {
+ "numPendingApplications" : 0,
+ "queueName" : "a3",
+ "userLimitFactor" : 1,
+ "maxApplications" : 0,
+ "usedCapacity" : 0,
+ "numContainers" : 0,
+ "state" : "RUNNING",
+ "maxCapacity" : 100,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 15.000001,
+ "maxActiveApplications" : 1,
+ "numActiveApplications" : 0,
+ "absoluteUsedCapacity" : 0,
+ "userLimit" : 100,
+ "absoluteCapacity" : 9,
+ "maxActiveApplicationsPerUser" : 1,
+ "capacity" : 90,
+ "type" : "capacitySchedulerLeafQueueInfo",
+ "maxApplicationsPerUser" : 0
+ },
+ {
+ "numPendingApplications" : 0,
+ "queueName" : "a4",
+ "userLimitFactor" : 1,
+ "maxApplications" : 0,
+ "usedCapacity" : 0,
+ "numContainers" : 0,
+ "state" : "RUNNING",
+ "maxCapacity" : 100,
+ "numApplications" : 0,
+ "usedResources" : "memory: 0",
+ "absoluteMaxCapacity" : 15.000001,
+ "maxActiveApplications" : 1,
+ "numActiveApplications" : 0,
+ "absoluteUsedCapacity" : 0,
+ "userLimit" : 100,
+ "absoluteCapacity" : 1.0000001,
+ "maxActiveApplicationsPerUser" : 1,
+ "capacity" : 10,
+ "type" : "capacitySchedulerLeafQueueInfo",
+ "maxApplicationsPerUser" : 0
+ }
+ ]
},
- {
- "numPendingApplications" : 0,
- "queueName" : "a3",
- "userLimitFactor" : 1,
- "maxApplications" : 400,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 4.0000005,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 40,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 400
- },
- {
- "numPendingApplications" : 0,
- "queueName" : "a4",
- "userLimitFactor" : 1,
- "maxApplications" : 100,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 1.0000001,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 10,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 100
- }
- ],
- "state" : "RUNNING",
- "maxCapacity" : 15.000001,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 15.000001
- }
- ],
+ "absoluteUsedCapacity" : 0
+ }
+ ]
+ },
"usedCapacity" : 0,
"capacity" : 100
}
@@ -575,7 +597,7 @@ ResourceManager REST API's.
}
+---+
- <<JSON response>>
+ <<XML response>>
HTTP Request:
@@ -603,155 +625,175 @@ ResourceManager REST API's.
<usedCapacity>0.0</usedCapacity>
<maxCapacity>100.0</maxCapacity>
<queueName>root</queueName>
- <queues xsi:type="capacitySchedulerLeafQueueInfo">
- <capacity>70.0</capacity>
- <usedCapacity>0.0</usedCapacity>
- <maxCapacity>90.0</maxCapacity>
- <absoluteCapacity>70.0</absoluteCapacity>
- <absoluteMaxCapacity>90.0</absoluteMaxCapacity>
- <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
- <numApplications>0</numApplications>
- <usedResources>memory: 0</usedResources>
- <queueName>default</queueName>
- <state>RUNNING</state>
- <numActiveApplications>0</numActiveApplications>
- <numPendingApplications>0</numPendingApplications>
- <numContainers>0</numContainers>
- <maxApplications>7000</maxApplications>
- <maxApplicationsPerUser>7000</maxApplicationsPerUser>
- <maxActiveApplications>1</maxActiveApplications>
- <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
- <userLimit>100</userLimit>
- <userLimitFactor>1.0</userLimitFactor>
- </queues>
<queues>
- <capacity>20.0</capacity>
- <usedCapacity>0.0</usedCapacity>
- <maxCapacity>80.0</maxCapacity>
- <absoluteCapacity>20.0</absoluteCapacity>
- <absoluteMaxCapacity>80.0</absoluteMaxCapacity>
- <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
- <numApplications>0</numApplications>
- <usedResources>memory: 0</usedResources>
- <queueName>test</queueName>
- <state>RUNNING</state>
- <subQueues xsi:type="capacitySchedulerLeafQueueInfo">
- <capacity>60.000004</capacity>
+ <queue xsi:type="capacitySchedulerLeafQueueInfo">
+ <capacity>70.0</capacity>
<usedCapacity>0.0</usedCapacity>
- <maxCapacity>80.0</maxCapacity>
- <absoluteCapacity>12.0</absoluteCapacity>
- <absoluteMaxCapacity>16.000002</absoluteMaxCapacity>
+ <maxCapacity>90.0</maxCapacity>
+ <absoluteCapacity>70.0</absoluteCapacity>
+ <absoluteMaxCapacity>90.0</absoluteMaxCapacity>
<absoluteUsedCapacity>0.0</absoluteUsedCapacity>
<numApplications>0</numApplications>
<usedResources>memory: 0</usedResources>
- <queueName>a1</queueName>
+ <queueName>default</queueName>
<state>RUNNING</state>
<numActiveApplications>0</numActiveApplications>
<numPendingApplications>0</numPendingApplications>
<numContainers>0</numContainers>
- <maxApplications>1200</maxApplications>
- <maxApplicationsPerUser>1200</maxApplicationsPerUser>
+ <maxApplications>1</maxApplications>
+ <maxApplicationsPerUser>1</maxApplicationsPerUser>
<maxActiveApplications>1</maxActiveApplications>
<maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
<userLimit>100</userLimit>
<userLimitFactor>1.0</userLimitFactor>
- </subQueues>
- <subQueues xsi:type="capacitySchedulerLeafQueueInfo">
- <capacity>40.0</capacity>
+ </queue>
+ <queue>
+ <capacity>20.0</capacity>
<usedCapacity>0.0</usedCapacity>
<maxCapacity>100.0</maxCapacity>
- <absoluteCapacity>8.000001</absoluteCapacity>
+ <absoluteCapacity>20.0</absoluteCapacity>
<absoluteMaxCapacity>100.0</absoluteMaxCapacity>
<absoluteUsedCapacity>0.0</absoluteUsedCapacity>
<numApplications>0</numApplications>
<usedResources>memory: 0</usedResources>
- <queueName>a2</queueName>
+ <queueName>test</queueName>
<state>RUNNING</state>
- <numActiveApplications>0</numActiveApplications>
- <numPendingApplications>0</numPendingApplications>
- <numContainers>0</numContainers>
- <maxApplications>800</maxApplications>
- <maxApplicationsPerUser>800</maxApplicationsPerUser>
- <maxActiveApplications>1</maxActiveApplications>
- <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
- <userLimit>100</userLimit>
- <userLimitFactor>1.0</userLimitFactor>
- </subQueues>
- </queues>
- <queues>
- <capacity>10.0</capacity>
- <usedCapacity>0.0</usedCapacity>
- <maxCapacity>15.000001</maxCapacity>
- <absoluteCapacity>10.0</absoluteCapacity>
- <absoluteMaxCapacity>15.000001</absoluteMaxCapacity>
- <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
- <numApplications>0</numApplications>
- <usedResources>memory: 0</usedResources>
- <queueName>test2</queueName>
- <state>RUNNING</state>
- <subQueues xsi:type="capacitySchedulerLeafQueueInfo">
- <capacity>50.0</capacity>
- <usedCapacity>0.0</usedCapacity>
- <maxCapacity>100.0</maxCapacity>
- <absoluteCapacity>5.0</absoluteCapacity>
- <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
- <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
- <numApplications>0</numApplications>
- <usedResources>memory: 0</usedResources>
- <queueName>A4</queueName>
- <state>RUNNING</state>
- <numActiveApplications>0</numActiveApplications>
- <numPendingApplications>0</numPendingApplications>
- <numContainers>0</numContainers>
- <maxApplications>500</maxApplications>
- <maxApplicationsPerUser>500</maxApplicationsPerUser>
- <maxActiveApplications>1</maxActiveApplications>
- <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
- <userLimit>100</userLimit>
- <userLimitFactor>1.0</userLimitFactor>
- </subQueues>
- <subQueues xsi:type="capacitySchedulerLeafQueueInfo">
- <capacity>40.0</capacity>
- <usedCapacity>0.0</usedCapacity>
- <maxCapacity>100.0</maxCapacity>
- <absoluteCapacity>4.0000005</absoluteCapacity>
- <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
- <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
- <numApplications>0</numApplications>
- <usedResources>memory: 0</usedResources>
- <queueName>a3</queueName>
- <state>RUNNING</state>
- <numActiveApplications>0</numActiveApplications>
- <numPendingApplications>0</numPendingApplications>
- <numContainers>0</numContainers>
- <maxApplications>400</maxApplications>
- <maxApplicationsPerUser>400</maxApplicationsPerUser>
- <maxActiveApplications>1</maxActiveApplications>
- <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
- <userLimit>100</userLimit>
- <userLimitFactor>1.0</userLimitFactor>
- </subQueues>
- <subQueues xsi:type="capacitySchedulerLeafQueueInfo">
+ <queues>
+ <queue>
+ <capacity>60.000004</capacity>
+ <usedCapacity>0.0</usedCapacity>
+ <maxCapacity>100.0</maxCapacity>
+ <absoluteCapacity>12.0</absoluteCapacity>
+ <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+ <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
+ <numApplications>0</numApplications>
+ <usedResources>memory: 0</usedResources>
+ <queueName>a1</queueName>
+ <state>RUNNING</state>
+ <queues>
+ <queue xsi:type="capacitySchedulerLeafQueueInfo">
+ <capacity>85.0</capacity>
+ <usedCapacity>0.0</usedCapacity>
+ <maxCapacity>100.0</maxCapacity>
+ <absoluteCapacity>10.200001</absoluteCapacity>
+ <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+ <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
+ <numApplications>0</numApplications>
+ <usedResources>memory: 0</usedResources>
+ <queueName>a11</queueName>
+ <state>RUNNING</state>
+ <numActiveApplications>0</numActiveApplications>
+ <numPendingApplications>0</numPendingApplications>
+ <numContainers>0</numContainers>
+ <maxApplications>0</maxApplications>
+ <maxApplicationsPerUser>0</maxApplicationsPerUser>
+ <maxActiveApplications>1</maxActiveApplications>
+ <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
+ <userLimit>100</userLimit>
+ <userLimitFactor>1.0</userLimitFactor>
+ </queue>
+ <queue xsi:type="capacitySchedulerLeafQueueInfo">
+ <capacity>15.000001</capacity>
+ <usedCapacity>0.0</usedCapacity>
+ <maxCapacity>100.0</maxCapacity>
+ <absoluteCapacity>1.8000001</absoluteCapacity>
+ <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+ <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
+ <numApplications>0</numApplications>
+ <usedResources>memory: 0</usedResources>
+ <queueName>a12</queueName>
+ <state>RUNNING</state>
+ <numActiveApplications>0</numActiveApplications>
+ <numPendingApplications>0</numPendingApplications>
+ <numContainers>0</numContainers>
+ <maxApplications>0</maxApplications>
+ <maxApplicationsPerUser>0</maxApplicationsPerUser>
+ <maxActiveApplications>1</maxActiveApplications>
+ <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
+ <userLimit>100</userLimit>
+ <userLimitFactor>1.0</userLimitFactor>
+ </queue>
+ </queues>
+ </queue>
+ <queue xsi:type="capacitySchedulerLeafQueueInfo">
+ <capacity>40.0</capacity>
+ <usedCapacity>0.0</usedCapacity>
+ <maxCapacity>100.0</maxCapacity>
+ <absoluteCapacity>8.000001</absoluteCapacity>
+ <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+ <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
+ <numApplications>0</numApplications>
+ <usedResources>memory: 0</usedResources>
+ <queueName>a2</queueName>
+ <state>RUNNING</state>
+ <numActiveApplications>0</numActiveApplications>
+ <numPendingApplications>0</numPendingApplications>
+ <numContainers>0</numContainers>
+ <maxApplications>0</maxApplications>
+ <maxApplicationsPerUser>0</maxApplicationsPerUser>
+ <maxActiveApplications>1</maxActiveApplications>
+ <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
+ <userLimit>100</userLimit>
+ <userLimitFactor>1.0</userLimitFactor>
+ </queue>
+ </queues>
+ </queue>
+ <queue>
<capacity>10.0</capacity>
<usedCapacity>0.0</usedCapacity>
- <maxCapacity>100.0</maxCapacity>
- <absoluteCapacity>1.0000001</absoluteCapacity>
- <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+ <maxCapacity>15.000001</maxCapacity>
+ <absoluteCapacity>10.0</absoluteCapacity>
+ <absoluteMaxCapacity>15.000001</absoluteMaxCapacity>
<absoluteUsedCapacity>0.0</absoluteUsedCapacity>
<numApplications>0</numApplications>
<usedResources>memory: 0</usedResources>
- <queueName>a4</queueName>
+ <queueName>test2</queueName>
<state>RUNNING</state>
- <numActiveApplications>0</numActiveApplications>
- <numPendingApplications>0</numPendingApplications>
- <numContainers>0</numContainers>
- <maxApplications>100</maxApplications>
- <maxApplicationsPerUser>100</maxApplicationsPerUser>
- <maxActiveApplications>1</maxActiveApplications>
- <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
- <userLimit>100</userLimit>
- <userLimitFactor>1.0</userLimitFactor>
- </subQueues>
+ <queues>
+ <queue xsi:type="capacitySchedulerLeafQueueInfo">
+ <capacity>90.0</capacity>
+ <usedCapacity>0.0</usedCapacity>
+ <maxCapacity>100.0</maxCapacity>
+ <absoluteCapacity>9.0</absoluteCapacity>
+ <absoluteMaxCapacity>15.000001</absoluteMaxCapacity>
+ <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
+ <numApplications>0</numApplications>
+ <usedResources>memory: 0</usedResources>
+ <queueName>a3</queueName>
+ <state>RUNNING</state>
+ <numActiveApplications>0</numActiveApplications>
+ <numPendingApplications>0</numPendingApplications>
+ <numContainers>0</numContainers>
+ <maxApplications>0</maxApplications>
+ <maxApplicationsPerUser>0</maxApplicationsPerUser>
+ <maxActiveApplications>1</maxActiveApplications>
+ <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
+ <userLimit>100</userLimit>
+ <userLimitFactor>1.0</userLimitFactor>
+ </queue>
+ <queue xsi:type="capacitySchedulerLeafQueueInfo">
+ <capacity>10.0</capacity>
+ <usedCapacity>0.0</usedCapacity>
+ <maxCapacity>100.0</maxCapacity>
+ <absoluteCapacity>1.0000001</absoluteCapacity>
+ <absoluteMaxCapacity>15.000001</absoluteMaxCapacity>
+ <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
+ <numApplications>0</numApplications>
+ <usedResources>memory: 0</usedResources>
+ <queueName>a4</queueName>
+ <state>RUNNING</state>
+ <numActiveApplications>0</numActiveApplications>
+ <numPendingApplications>0</numPendingApplications>
+ <numContainers>0</numContainers>
+ <maxApplications>0</maxApplications>
+ <maxApplicationsPerUser>0</maxApplicationsPerUser>
+ <maxActiveApplications>1</maxActiveApplications>
+ <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
+ <userLimit>100</userLimit>
+ <userLimitFactor>1.0</userLimitFactor>
+ </queue>
+ </queues>
+ </queue>
</queues>
</schedulerInfo>
</scheduler>
@@ -890,6 +932,7 @@ ResourceManager REST API's.
------
* state - state of the application
+ * finalStatus - the final status of the application - reported by the application itself
* user - user name
* queue - queue name
* limit - total number of app objects to be returned
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WritingYarnApplications.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WritingYarnApplications.apt.vm?rev=1309164&r1=1309163&r2=1309164&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WritingYarnApplications.apt.vm (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WritingYarnApplications.apt.vm Tue Apr 3 21:48:23 2012
@@ -779,6 +779,13 @@ Hadoop MapReduce Next Generation - Writi
need to increase the value of the the cluster-wide configuration variable
<<<yarn.nodemanager.vmem-pmem-ratio>>>.
+** How do I include native libraries?
+
+
+ Setting -Djava.library.path on the command line while launching a container
+ can cause native libraries used by Hadoop to not be loaded correctly and can
+ result in errors. It is cleaner to use LD_LIBRARY_PATH instead.
+
* Useful Links
* {{{https://issues.apache.org/jira/secure/attachment/12486023/MapReduce_NextGen_Architecture.pdf}Map Reduce Next Generation Architecture}}
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/c++/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/c++:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/block_forensics/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/build-contrib.xml
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/build.xml
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/data_join/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/eclipse-plugin/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/index/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/vaidya/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/examples/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/examples:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/java:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:r1308236-1309161
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:r1308236-1309161
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java?rev=1309164&r1=1309163&r2=1309164&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java Tue Apr 3 21:48:23 2012
@@ -354,13 +354,9 @@ public class TestJobInProgress {
Node r2n3 = new NodeBase("/default/rack2/node3");
nt.add(r2n3);
- Node r2n4 = new NodeBase("/default/rack2/s1/node4");
- nt.add(r2n4);
-
LOG.debug("r1n1 parent: " + r1n1.getParent() + "\n" +
"r1n2 parent: " + r1n2.getParent() + "\n" +
- "r2n3 parent: " + r2n3.getParent() + "\n" +
- "r2n4 parent: " + r2n4.getParent());
+ "r2n3 parent: " + r2n3.getParent());
// Same host
assertEquals(0, JobInProgress.getMatchingLevelForNodes(r1n1, r1n1, 3));
@@ -368,8 +364,6 @@ public class TestJobInProgress {
assertEquals(1, JobInProgress.getMatchingLevelForNodes(r1n1, r1n2, 3));
// Different rack
assertEquals(2, JobInProgress.getMatchingLevelForNodes(r1n1, r2n3, 3));
- // Different rack at different depth
- assertEquals(3, JobInProgress.getMatchingLevelForNodes(r1n1, r2n4, 3));
}
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTTResourceReporting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTTResourceReporting.java?rev=1309164&r1=1309163&r2=1309164&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTTResourceReporting.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTTResourceReporting.java Tue Apr 3 21:48:23 2012
@@ -1,364 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapreduce.SleepJob;
-import org.apache.hadoop.mapreduce.TaskCounter;
-import org.apache.hadoop.mapreduce.util.LinuxResourceCalculatorPlugin;
-import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
-
-import junit.framework.TestCase;
-import org.junit.Test;
-import org.junit.After;
-
-/**
- * This test class tests the functionality related to configuring, reporting
- * and computing memory related parameters in a Map/Reduce cluster.
- *
- * Each test sets up a {@link MiniMRCluster} with a locally defined
- * {@link org.apache.hadoop.mapred.TaskScheduler}. This scheduler validates
- * the memory related configuration is correctly computed and reported from
- * the tasktracker in
- * {@link org.apache.hadoop.mapred.TaskScheduler#assignTasks(TaskTrackerStatus)}.
- */
-public class TestTTResourceReporting extends TestCase {
-
- static final Log LOG = LogFactory.getLog(TestTTResourceReporting.class);
-
- private MiniMRCluster miniMRCluster;
-
- /**
- * Fake scheduler to test the proper reporting of memory values by TT
- */
- public static class FakeTaskScheduler extends JobQueueTaskScheduler {
-
- private boolean hasPassed = true;
- private boolean hasDynamicValuePassed = true;
- private String message;
-
- public FakeTaskScheduler() {
- super();
- }
-
- public boolean hasTestPassed() {
- return hasPassed;
- }
-
- public boolean hasDynamicTestPassed() {
- return hasDynamicValuePassed;
- }
-
- public String getFailureMessage() {
- return message;
- }
-
- @Override
- public List<Task> assignTasks(TaskTracker taskTracker)
- throws IOException {
- TaskTrackerStatus status = taskTracker.getStatus();
- long totalVirtualMemoryOnTT =
- getConf().getLong("totalVmemOnTT", JobConf.DISABLED_MEMORY_LIMIT);
- long totalPhysicalMemoryOnTT =
- getConf().getLong("totalPmemOnTT", JobConf.DISABLED_MEMORY_LIMIT);
- long mapSlotMemorySize =
- getConf().getLong("mapSlotMemorySize", JobConf.DISABLED_MEMORY_LIMIT);
- long reduceSlotMemorySize =
- getConf()
- .getLong("reduceSlotMemorySize", JobConf.DISABLED_MEMORY_LIMIT);
- long availableVirtualMemoryOnTT =
- getConf().getLong("availableVmemOnTT", JobConf.DISABLED_MEMORY_LIMIT);
- long availablePhysicalMemoryOnTT =
- getConf().getLong("availablePmemOnTT", JobConf.DISABLED_MEMORY_LIMIT);
- long cumulativeCpuTime =
- getConf().getLong("cumulativeCpuTime", TaskTrackerStatus.UNAVAILABLE);
- long cpuFrequency =
- getConf().getLong("cpuFrequency", TaskTrackerStatus.UNAVAILABLE);
- int numProcessors =
- getConf().getInt("numProcessors", TaskTrackerStatus.UNAVAILABLE);
- float cpuUsage =
- getConf().getFloat("cpuUsage", TaskTrackerStatus.UNAVAILABLE);
-
- long reportedTotalVirtualMemoryOnTT =
- status.getResourceStatus().getTotalVirtualMemory();
- long reportedTotalPhysicalMemoryOnTT =
- status.getResourceStatus().getTotalPhysicalMemory();
- long reportedMapSlotMemorySize =
- status.getResourceStatus().getMapSlotMemorySizeOnTT();
- long reportedReduceSlotMemorySize =
- status.getResourceStatus().getReduceSlotMemorySizeOnTT();
- long reportedAvailableVirtualMemoryOnTT =
- status.getResourceStatus().getAvailabelVirtualMemory();
- long reportedAvailablePhysicalMemoryOnTT =
- status.getResourceStatus().getAvailablePhysicalMemory();
- long reportedCumulativeCpuTime =
- status.getResourceStatus().getCumulativeCpuTime();
- long reportedCpuFrequency = status.getResourceStatus().getCpuFrequency();
- int reportedNumProcessors = status.getResourceStatus().getNumProcessors();
- float reportedCpuUsage = status.getResourceStatus().getCpuUsage();
-
- message =
- "expected values : "
- + "(totalVirtualMemoryOnTT, totalPhysicalMemoryOnTT, "
- + "availableVirtualMemoryOnTT, availablePhysicalMemoryOnTT, "
- + "mapSlotMemSize, reduceSlotMemorySize, cumulativeCpuTime, "
- + "cpuFrequency, numProcessors, cpuUsage) = ("
- + totalVirtualMemoryOnTT + ", "
- + totalPhysicalMemoryOnTT + ","
- + availableVirtualMemoryOnTT + ", "
- + availablePhysicalMemoryOnTT + ","
- + mapSlotMemorySize + ","
- + reduceSlotMemorySize + ","
- + cumulativeCpuTime + ","
- + cpuFrequency + ","
- + numProcessors + ","
- + cpuUsage
- +")";
- message +=
- "\nreported values : "
- + "(totalVirtualMemoryOnTT, totalPhysicalMemoryOnTT, "
- + "availableVirtualMemoryOnTT, availablePhysicalMemoryOnTT, "
- + "reportedMapSlotMemorySize, reportedReduceSlotMemorySize, "
- + "reportedCumulativeCpuTime, reportedCpuFrequency, "
- + "reportedNumProcessors, cpuUsage) = ("
- + reportedTotalVirtualMemoryOnTT + ", "
- + reportedTotalPhysicalMemoryOnTT + ","
- + reportedAvailableVirtualMemoryOnTT + ", "
- + reportedAvailablePhysicalMemoryOnTT + ","
- + reportedMapSlotMemorySize + ","
- + reportedReduceSlotMemorySize + ","
- + reportedCumulativeCpuTime + ","
- + reportedCpuFrequency + ","
- + reportedNumProcessors + ","
- + reportedCpuUsage
- + ")";
- LOG.info(message);
- hasDynamicValuePassed = true;
- // Check task resource status in task reports
- for (TaskStatus taskStatus : status.getTaskReports()) {
- Counters counters = taskStatus.getCounters();
- // This should be zero because the initial CPU time is subtracted.
- long procCumulativeCpuTime = 0;
- long procVirtualMemorySize =
- getConf().getLong("procVirtualMemorySize", -1);
- long procPhysicalMemorySize =
- getConf().getLong("procPhysicalMemorySize", -1);
- long reportedProcCumulativeCpuTime =
- counters.findCounter(TaskCounter.CPU_MILLISECONDS).getValue();
- long reportedProcVirtualMemorySize =
- counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES).getValue();
- long reportedProcPhysicalMemorySize =
- counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES).getValue();
- String procMessage =
- "expected values : "
- + "(procCumulativeCpuTime, procVirtualMemorySize,"
- + " procPhysicalMemorySize) = ("
- + procCumulativeCpuTime + ", "
- + procVirtualMemorySize + ", "
- + procPhysicalMemorySize + ")";
- procMessage +=
- "\nreported values : "
- + "(procCumulativeCpuTime, procVirtualMemorySize,"
- + " procPhysicalMemorySize) = ("
- + reportedProcCumulativeCpuTime + ", "
- + reportedProcVirtualMemorySize + ", "
- + reportedProcPhysicalMemorySize + ")";
- LOG.info(procMessage);
- message += "\n" + procMessage;
- if (procCumulativeCpuTime != reportedProcCumulativeCpuTime ||
- procVirtualMemorySize != reportedProcVirtualMemorySize ||
- procPhysicalMemorySize != reportedProcPhysicalMemorySize) {
- hasDynamicValuePassed = false;
- }
- }
- hasPassed = true;
- if (totalVirtualMemoryOnTT != reportedTotalVirtualMemoryOnTT
- || totalPhysicalMemoryOnTT != reportedTotalPhysicalMemoryOnTT
- || mapSlotMemorySize != reportedMapSlotMemorySize
- || reduceSlotMemorySize != reportedReduceSlotMemorySize
- || numProcessors != reportedNumProcessors) {
- hasPassed = false;
- }
- // These values changes every moment on the node so it can only be
- // tested by DummyMemoryCalculatorPlugin. Need to check them separately
- if (availableVirtualMemoryOnTT != reportedAvailableVirtualMemoryOnTT
- || availablePhysicalMemoryOnTT != reportedAvailablePhysicalMemoryOnTT
- || cumulativeCpuTime != reportedCumulativeCpuTime
- || cpuFrequency != reportedCpuFrequency
- || cpuUsage != reportedCpuUsage) {
- hasDynamicValuePassed = false;
- }
- return super.assignTasks(taskTracker);
- }
- }
-
- /**
- * Test that verifies default values are configured and reported correctly.
- *
- * @throws Exception
- */
- @Test
- public void testDefaultResourceValues()
- throws Exception {
- JobConf conf = new JobConf();
- try {
- // Memory values are disabled by default.
- conf.setClass(
- org.apache.hadoop.mapreduce.server.tasktracker.TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
- DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
- setUpCluster(conf);
- JobConf jobConf = miniMRCluster.createJobConf();
- jobConf.setClass(
- org.apache.hadoop.mapreduce.server.tasktracker.TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
- DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
- runSleepJob(jobConf);
- verifyTestResults();
- } finally {
- tearDownCluster();
- }
- }
-
- /**
- * Test that verifies that configured values are reported correctly.
- *
- * @throws Exception
- */
- @Test
- public void testConfiguredResourceValues()
- throws Exception {
- JobConf conf = new JobConf();
- conf.setLong("totalVmemOnTT", 4 * 1024 * 1024 * 1024L);
- conf.setLong("totalPmemOnTT", 2 * 1024 * 1024 * 1024L);
- conf.setLong("mapSlotMemorySize", 1 * 512L);
- conf.setLong("reduceSlotMemorySize", 1 * 1024L);
- conf.setLong("availableVmemOnTT", 4 * 1024 * 1024 * 1024L);
- conf.setLong("availablePmemOnTT", 2 * 1024 * 1024 * 1024L);
- conf.setLong("cumulativeCpuTime", 10000L);
- conf.setLong("cpuFrequency", 2000000L);
- conf.setInt("numProcessors", 8);
- conf.setFloat("cpuUsage", 15.5F);
- conf.setLong("procCumulativeCpuTime", 1000L);
- conf.setLong("procVirtualMemorySize", 2 * 1024 * 1024 * 1024L);
- conf.setLong("procPhysicalMemorySize", 1024 * 1024 * 1024L);
-
- conf.setClass(
- org.apache.hadoop.mapreduce.server.tasktracker.TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
- DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
- conf.setLong(DummyResourceCalculatorPlugin.MAXVMEM_TESTING_PROPERTY,
- 4 * 1024 * 1024 * 1024L);
- conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,
- 2 * 1024 * 1024 * 1024L);
- conf.setLong(MRConfig.MAPMEMORY_MB, 512L);
- conf.setLong(MRConfig.REDUCEMEMORY_MB, 1024L);
- conf.setLong(DummyResourceCalculatorPlugin.CUMULATIVE_CPU_TIME, 10000L);
- conf.setLong(DummyResourceCalculatorPlugin.CPU_FREQUENCY, 2000000L);
- conf.setInt(DummyResourceCalculatorPlugin.NUM_PROCESSORS, 8);
- conf.setFloat(DummyResourceCalculatorPlugin.CPU_USAGE, 15.5F);
- try {
- setUpCluster(conf);
- JobConf jobConf = miniMRCluster.createJobConf();
- jobConf.setMemoryForMapTask(1 * 1024L);
- jobConf.setMemoryForReduceTask(2 * 1024L);
- jobConf.setClass(
- org.apache.hadoop.mapreduce.server.tasktracker.TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,
- DummyResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
- jobConf.setLong(DummyResourceCalculatorPlugin.PROC_CUMULATIVE_CPU_TIME, 1000L);
- jobConf.setLong(DummyResourceCalculatorPlugin.PROC_VMEM_TESTING_PROPERTY,
- 2 * 1024 * 1024 * 1024L);
- jobConf.setLong(DummyResourceCalculatorPlugin.PROC_PMEM_TESTING_PROPERTY,
- 1024 * 1024 * 1024L);
- runSleepJob(jobConf);
- verifyTestResults();
- } finally {
- tearDownCluster();
- }
- }
-
- /**
- * Test that verifies that total memory values are calculated and reported
- * correctly.
- *
- * @throws Exception
- */
- @Test
- public void testResourceValuesOnLinux()
- throws Exception {
- if (!System.getProperty("os.name").startsWith("Linux")) {
- return;
- }
-
- JobConf conf = new JobConf();
- LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
- // In this case, we only check these three fields because they are static
- conf.setLong("totalVmemOnTT", plugin.getVirtualMemorySize());
- conf.setLong("totalPmemOnTT", plugin.getPhysicalMemorySize());
- conf.setLong("numProcessors", plugin.getNumProcessors());
-
- try {
- setUpCluster(conf);
- runSleepJob(miniMRCluster.createJobConf());
- verifyTestResults(true);
- } finally {
- tearDownCluster();
- }
- }
-
- private void setUpCluster(JobConf conf)
- throws Exception {
- conf.setClass(JTConfig.JT_TASK_SCHEDULER,
- TestTTResourceReporting.FakeTaskScheduler.class, TaskScheduler.class);
- conf.set(JTConfig.JT_IPC_HANDLER_COUNT, "1");
- miniMRCluster = new MiniMRCluster(1, "file:///", 3, null, null, conf);
- }
-
- private void runSleepJob(JobConf conf) throws Exception {
- String[] args = { "-m", "1", "-r", "1",
- "-mt", "10", "-rt", "10" };
- ToolRunner.run(conf, new SleepJob(), args);
- }
-
- private void verifyTestResults() {
- verifyTestResults(false);
- }
-
- private void verifyTestResults(boolean excludeDynamic) {
- FakeTaskScheduler scheduler =
- (FakeTaskScheduler)miniMRCluster.getJobTrackerRunner().
- getJobTracker().getTaskScheduler();
- assertTrue(scheduler.getFailureMessage(), scheduler.hasTestPassed());
- if (!excludeDynamic) {
- assertTrue(scheduler.getFailureMessage(),
- scheduler.hasDynamicTestPassed());
- }
- }
-
- @After
- private void tearDownCluster() {
- if (miniMRCluster != null) {
- miniMRCluster.shutdown();
- }
- }
-}
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/webapps/job/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/webapps/job:r1308236-1309161