You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by to...@apache.org on 2012/05/12 22:52:49 UTC
svn commit: r1337645 [3/3] - in
/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project: ./ bin/ conf/
hadoop-mapreduce-client/
hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/
hadoop-mapreduce-client/hadoo...
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java Sat May 12 20:52:34 2012
@@ -25,8 +25,8 @@ import javax.xml.bind.annotation.XmlTran
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
@@ -35,7 +35,7 @@ import org.apache.hadoop.yarn.server.res
public class NodeInfo {
protected String rack;
- protected RMNodeState state;
+ protected NodeState state;
protected String id;
protected String nodeHostName;
protected String nodeHTTPAddress;
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java Sat May 12 20:52:34 2012
@@ -26,11 +26,11 @@ import org.apache.hadoop.yarn.api.record
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import com.google.common.collect.Lists;
@@ -48,7 +48,7 @@ public class MockNodes {
for (int j = 0; j < nodesPerRack; ++j) {
if (j == (nodesPerRack - 1)) {
// One unhealthy node per rack.
- list.add(nodeInfo(i, perNode, RMNodeState.UNHEALTHY));
+ list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY));
}
list.add(newNodeInfo(i, perNode));
}
@@ -61,7 +61,7 @@ public class MockNodes {
List<RMNode> list = Lists.newArrayList();
for (int i = 0; i < racks; ++i) {
for (int j = 0; j < nodesPerRack; ++j) {
- RMNodeState[] allStates = RMNodeState.values();
+ NodeState[] allStates = NodeState.values();
list.add(nodeInfo(i, perNode, allStates[j % allStates.length]));
}
}
@@ -102,11 +102,11 @@ public class MockNodes {
private Resource perNode;
private String rackName;
private NodeHealthStatus nodeHealthStatus;
- private RMNodeState state;
+ private NodeState state;
public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress,
Resource perNode, String rackName, NodeHealthStatus nodeHealthStatus,
- int cmdPort, String hostName, RMNodeState state) {
+ int cmdPort, String hostName, NodeState state) {
this.nodeId = nodeId;
this.nodeAddr = nodeAddr;
this.httpAddress = httpAddress;
@@ -169,7 +169,7 @@ public class MockNodes {
}
@Override
- public RMNodeState getState() {
+ public NodeState getState() {
return this.state;
}
@@ -189,11 +189,11 @@ public class MockNodes {
}
};
- private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr) {
+ private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) {
return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++);
}
- private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr, int hostnum) {
+ private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr, int hostnum) {
final String rackName = "rack"+ rack;
final int nid = hostnum;
final String hostName = "host"+ nid;
@@ -202,7 +202,7 @@ public class MockNodes {
final String httpAddress = httpAddr;
final NodeHealthStatus nodeHealthStatus =
recordFactory.newRecordInstance(NodeHealthStatus.class);
- if (state != RMNodeState.UNHEALTHY) {
+ if (state != NodeState.UNHEALTHY) {
nodeHealthStatus.setIsNodeHealthy(true);
nodeHealthStatus.setHealthReport("HealthyMe");
}
@@ -211,12 +211,12 @@ public class MockNodes {
}
public static RMNode nodeInfo(int rack, final Resource perNode,
- RMNodeState state) {
+ NodeState state) {
return buildRMNode(rack, perNode, state, "N/A");
}
public static RMNode newNodeInfo(int rack, final Resource perNode) {
- return buildRMNode(rack, perNode, RMNodeState.RUNNING, "localhost:0");
+ return buildRMNode(rack, perNode, NodeState.RUNNING, "localhost:0");
}
public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) {
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java Sat May 12 20:52:34 2012
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.record
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
@@ -48,7 +49,6 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
@@ -146,7 +146,7 @@ public class MockRM extends ResourceMana
node.handle(new RMNodeEvent(nm.getNodeId(), RMNodeEventType.EXPIRE));
}
- public void NMwaitForState(NodeId nodeid, RMNodeState finalState)
+ public void NMwaitForState(NodeId nodeid, NodeState finalState)
throws Exception {
RMNode node = getRMContext().getRMNodes().get(nodeid);
Assert.assertNotNull("node shouldn't be null", node);
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java Sat May 12 20:52:34 2012
@@ -33,7 +33,6 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.junit.After;
@@ -83,7 +82,7 @@ public class TestAMRMRPCNodeUpdates {
private void syncNodeLost(MockNM nm) throws Exception {
rm.sendNodeStarted(nm);
- rm.NMwaitForState(nm.getNodeId(), RMNodeState.RUNNING);
+ rm.NMwaitForState(nm.getNodeId(), NodeState.RUNNING);
rm.sendNodeLost(nm);
dispatcher.await();
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java Sat May 12 20:52:34 2012
@@ -20,9 +20,9 @@ package org.apache.hadoop.yarn.server.re
import java.io.IOException;
import java.io.PrintWriter;
+import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodesPage.NodesBlock;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Before;
@@ -44,7 +44,7 @@ public class TestNodesPage {
// The following is because of the way TestRMWebApp.mockRMContext creates
// nodes.
final int numberOfLostNodesPerRack = numberOfNodesPerRack
- / RMNodeState.values().length;
+ / NodeState.values().length;
// Number of Actual Table Headers for NodesPage.NodesBlock might change in
// future. In that case this value should be adjusted to the new value.
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java Sat May 12 20:52:34 2012
@@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentMa
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -41,7 +42,6 @@ import org.apache.hadoop.yarn.server.res
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
@@ -124,13 +124,13 @@ public class TestRMWebApp {
// Unhealthy nodes
instance.moreParams().put(YarnWebParams.NODE_STATE,
- RMNodeState.UNHEALTHY.toString());
+ NodeState.UNHEALTHY.toString());
instance.render();
WebAppTests.flushOutput(injector);
// Lost nodes
instance.moreParams().put(YarnWebParams.NODE_STATE,
- RMNodeState.LOST.toString());
+ NodeState.LOST.toString());
instance.render();
WebAppTests.flushOutput(injector);
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java Sat May 12 20:52:34 2012
@@ -32,13 +32,13 @@ import javax.xml.parsers.DocumentBuilder
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
@@ -54,7 +54,6 @@ import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
-
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
@@ -131,15 +130,15 @@ public class TestRMWebServicesNodes exte
MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1);
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
- rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
// One unhealthy node which should not appear in the list after
// MAPREDUCE-3760.
MockNM nm3 = rm.registerNode("h3:1236", 5122);
- rm.NMwaitForState(nm3.getNodeId(), RMNodeState.NEW);
+ rm.NMwaitForState(nm3.getNodeId(), NodeState.NEW);
rm.sendNodeStarted(nm3);
- rm.NMwaitForState(nm3.getNodeId(), RMNodeState.RUNNING);
+ rm.NMwaitForState(nm3.getNodeId(), NodeState.RUNNING);
RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes()
.get(nm3.getNodeId());
NodeHealthStatus nodeHealth = node.getNodeHealthStatus();
@@ -147,7 +146,7 @@ public class TestRMWebServicesNodes exte
nodeHealth.setIsNodeHealthy(false);
node.handle(new RMNodeStatusEvent(nm3.getNodeId(), nodeHealth,
new ArrayList<ContainerStatus>(), null, null));
- rm.NMwaitForState(nm3.getNodeId(), RMNodeState.UNHEALTHY);
+ rm.NMwaitForState(nm3.getNodeId(), NodeState.UNHEALTHY);
ClientResponse response =
r.path("ws").path("v1").path("cluster").path("nodes")
@@ -169,11 +168,11 @@ public class TestRMWebServicesNodes exte
MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1);
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
- rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
ClientResponse response = r.path("ws").path("v1").path("cluster")
- .path("nodes").queryParam("state", RMNodeState.RUNNING.toString())
+ .path("nodes").queryParam("state", NodeState.RUNNING.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
@@ -196,7 +195,7 @@ public class TestRMWebServicesNodes exte
ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes")
- .queryParam("state", RMNodeState.DECOMMISSIONED.toString())
+ .queryParam("state", NodeState.DECOMMISSIONED.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
@@ -231,7 +230,7 @@ public class TestRMWebServicesNodes exte
WebServicesTestUtils
.checkStringMatch(
"exception message",
- "No enum const class org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState.BOGUSSTATE",
+ "No enum const class org.apache.hadoop.yarn.api.records.NodeState.BOGUSSTATE",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"IllegalArgumentException", type);
@@ -250,13 +249,13 @@ public class TestRMWebServicesNodes exte
MockNM nm2 = rm.registerNode("h2:1234", 5120);
rm.sendNodeStarted(nm1);
rm.sendNodeStarted(nm2);
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
- rm.NMwaitForState(nm2.getNodeId(), RMNodeState.RUNNING);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), NodeState.RUNNING);
rm.sendNodeLost(nm1);
rm.sendNodeLost(nm2);
ClientResponse response = r.path("ws").path("v1").path("cluster")
- .path("nodes").queryParam("state", RMNodeState.LOST.toString())
+ .path("nodes").queryParam("state", NodeState.LOST.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
@@ -283,8 +282,8 @@ public class TestRMWebServicesNodes exte
MockNM nm2 = rm.registerNode("h2:1234", 5120);
rm.sendNodeStarted(nm1);
rm.sendNodeStarted(nm2);
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
- rm.NMwaitForState(nm2.getNodeId(), RMNodeState.RUNNING);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), NodeState.RUNNING);
rm.sendNodeLost(nm1);
rm.sendNodeLost(nm2);
@@ -312,8 +311,8 @@ public class TestRMWebServicesNodes exte
MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1);
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
- rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("healthy", "true")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
@@ -332,8 +331,8 @@ public class TestRMWebServicesNodes exte
MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1);
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
- rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("healthy", "TRUe")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
@@ -353,8 +352,8 @@ public class TestRMWebServicesNodes exte
MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1);
- rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW);
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes()
.get(nm1.getNodeId());
NodeHealthStatus nodeHealth = node.getNodeHealthStatus();
@@ -362,11 +361,11 @@ public class TestRMWebServicesNodes exte
nodeHealth.setIsNodeHealthy(false);
node.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeHealth,
new ArrayList<ContainerStatus>(), null, null));
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.UNHEALTHY);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.UNHEALTHY);
ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("healthy", "true")
- .queryParam("state", RMNodeState.RUNNING.toString())
+ .queryParam("state", NodeState.RUNNING.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
@@ -380,8 +379,8 @@ public class TestRMWebServicesNodes exte
MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:1235", 5121);
rm.sendNodeStarted(nm1);
- rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING);
- rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW);
+ rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
+ rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("nodes").queryParam("healthy", "false")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java Sat May 12 20:52:34 2012
@@ -399,10 +399,8 @@ public class TestContainerManagerSecurit
Token<ApplicationTokenIdentifier> appToken =
new Token<ApplicationTokenIdentifier>(appTokenIdentifier,
appTokenSecretManager);
- appToken.setService(new Text(schedulerAddr.getHostName() + ":"
- + schedulerAddr.getPort()));
- currentUser.addToken(appToken);
SecurityUtil.setTokenService(appToken, schedulerAddr);
+ currentUser.addToken(appToken);
AMRMProtocol scheduler = currentUser
.doAs(new PrivilegedAction<AMRMProtocol>() {
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java Sat May 12 20:52:34 2012
@@ -24,6 +24,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -34,6 +35,12 @@ import org.apache.hadoop.yarn.service.Co
* web interfaces.
*/
public class WebAppProxyServer extends CompositeService {
+
+ /**
+ * Priority of the ResourceManager shutdown hook.
+ */
+ public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+
private static final Log LOG = LogFactory.getLog(WebAppProxyServer.class);
private WebAppProxy proxy = null;
@@ -69,8 +76,9 @@ public class WebAppProxyServer extends C
StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
try {
WebAppProxyServer proxy = new WebAppProxyServer();
- Runtime.getRuntime().addShutdownHook(
- new CompositeServiceShutdownHook(proxy));
+ ShutdownHookManager.get().addShutdownHook(
+ new CompositeServiceShutdownHook(proxy),
+ SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration();
proxy.init(conf);
proxy.start();
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/pom.xml?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/pom.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/pom.xml Sat May 12 20:52:34 2012
@@ -128,8 +128,8 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
- <!-- needed for security and runtime -->
<artifactId>hadoop-hdfs</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.inject</groupId>
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/c++/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/c++:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/block_forensics/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/build-contrib.xml
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/build.xml
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/data_join/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/eclipse-plugin/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/index/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:r1333291-1337618
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java Sat May 12 20:52:34 2012
@@ -144,25 +144,25 @@ public class BlockPlacementPolicyRaid ex
/** {@inheritDoc} */
@Override
- public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
+ public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
Block block, short replicationFactor,
Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) {
DatanodeDescriptor chosenNode = null;
try {
- String path = cachedFullPathNames.get(inode);
+ String path = cachedFullPathNames.get(bc);
FileType type = getFileType(path);
if (type == FileType.NOT_RAID) {
return defaultPolicy.chooseReplicaToDelete(
- inode, block, replicationFactor, first, second);
+ bc, block, replicationFactor, first, second);
}
List<LocatedBlock> companionBlocks =
getCompanionBlocks(path, type, block);
if (companionBlocks == null || companionBlocks.size() == 0) {
// Use the default method if it is not a valid raided or parity file
return defaultPolicy.chooseReplicaToDelete(
- inode, block, replicationFactor, first, second);
+ bc, block, replicationFactor, first, second);
}
// Delete from the first collection first
// This ensures the number of unique rack of this block is not reduced
@@ -174,12 +174,12 @@ public class BlockPlacementPolicyRaid ex
return chosenNode;
}
return defaultPolicy.chooseReplicaToDelete(
- inode, block, replicationFactor, first, second);
+ bc, block, replicationFactor, first, second);
} catch (Exception e) {
LOG.debug("Error happend when choosing replica to delete" +
StringUtils.stringifyException(e));
return defaultPolicy.chooseReplicaToDelete(
- inode, block, replicationFactor, first, second);
+ bc, block, replicationFactor, first, second);
}
}
@@ -425,7 +425,7 @@ public class BlockPlacementPolicyRaid ex
}
/**
- * Cache results for FSInodeInfo.getFullPathName()
+ * Cache results for getFullPathName()
*/
static class CachedFullPathNames {
FSNamesystem namesystem;
@@ -446,25 +446,25 @@ public class BlockPlacementPolicyRaid ex
};
static private class INodeWithHashCode {
- FSInodeInfo inode;
- INodeWithHashCode(FSInodeInfo inode) {
- this.inode = inode;
+ BlockCollection bc;
+ INodeWithHashCode(BlockCollection bc) {
+ this.bc= bc;
}
@Override
public boolean equals(Object obj) {
- return inode == obj;
+ return bc== obj;
}
@Override
public int hashCode() {
- return System.identityHashCode(inode);
+ return System.identityHashCode(bc);
}
String getFullPathName() {
- return inode.getFullPathName();
+ return bc.getName();
}
}
- public String get(FSInodeInfo inode) throws IOException {
- return cacheInternal.get(new INodeWithHashCode(inode));
+ public String get(BlockCollection bc) throws IOException {
+ return cacheInternal.get(new INodeWithHashCode(bc));
}
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java Sat May 12 20:52:34 2012
@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedFullPathNames;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedLocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.FileType;
-import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNodeRaidTestUtil;
@@ -241,19 +240,19 @@ public class TestBlockPlacementPolicyRai
// test full path cache
CachedFullPathNames cachedFullPathNames =
new CachedFullPathNames(namesystem);
- final FSInodeInfo[] inodes = NameNodeRaidTestUtil.getFSInodeInfo(
+ final BlockCollection[] bcs = NameNodeRaidTestUtil.getBlockCollections(
namesystem, file1, file2);
- verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]);
- verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]);
- verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]);
- verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]);
+ verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]);
+ verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]);
+ verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]);
+ verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]);
try {
Thread.sleep(1200L);
} catch (InterruptedException e) {
}
- verifyCachedFullPathNameResult(cachedFullPathNames, inodes[1]);
- verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]);
+ verifyCachedFullPathNameResult(cachedFullPathNames, bcs[1]);
+ verifyCachedFullPathNameResult(cachedFullPathNames, bcs[0]);
} finally {
if (cluster != null) {
cluster.shutdown();
@@ -477,14 +476,14 @@ public class TestBlockPlacementPolicyRai
}
private void verifyCachedFullPathNameResult(
- CachedFullPathNames cachedFullPathNames, FSInodeInfo inode)
+ CachedFullPathNames cachedFullPathNames, BlockCollection bc)
throws IOException {
- String res1 = inode.getFullPathName();
- String res2 = cachedFullPathNames.get(inode);
+ String res1 = bc.getName();
+ String res2 = cachedFullPathNames.get(bc);
LOG.info("Actual path name: " + res1);
LOG.info("Cached path name: " + res2);
- Assert.assertEquals(cachedFullPathNames.get(inode),
- inode.getFullPathName());
+ Assert.assertEquals(cachedFullPathNames.get(bc),
+ bc.getName());
}
private void verifyCachedBlocksResult(CachedLocatedBlocks cachedBlocks,
@@ -503,7 +502,7 @@ public class TestBlockPlacementPolicyRai
private Collection<LocatedBlock> getCompanionBlocks(
FSNamesystem namesystem, BlockPlacementPolicyRaid policy,
ExtendedBlock block) throws IOException {
- INodeFile inode = blockManager.blocksMap.getINode(block
+ INodeFile inode = (INodeFile)blockManager.blocksMap.getBlockCollection(block
.getLocalBlock());
FileType type = policy.getFileType(inode.getFullPathName());
return policy.getCompanionBlocks(inode.getFullPathName(), type,
Modified: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java?rev=1337645&r1=1337644&r2=1337645&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java Sat May 12 20:52:34 2012
@@ -18,16 +18,17 @@
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
public class NameNodeRaidTestUtil {
- public static FSInodeInfo[] getFSInodeInfo(final FSNamesystem namesystem,
+ public static BlockCollection[] getBlockCollections(final FSNamesystem namesystem,
final String... files) throws UnresolvedLinkException {
- final FSInodeInfo[] inodes = new FSInodeInfo[files.length];
+ final BlockCollection[] inodes = new BlockCollection[files.length];
final FSDirectory dir = namesystem.dir;
dir.readLock();
try {
for(int i = 0; i < files.length; i++) {
- inodes[i] = dir.rootDir.getNode(files[i], true);
+ inodes[i] = (BlockCollection)dir.rootDir.getNode(files[i], true);
}
return inodes;
} finally {
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/contrib/vaidya/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/examples/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/examples:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/java:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:r1333291-1337618
Propchange: hadoop/common/branches/HDFS-3042/hadoop-mapreduce-project/src/webapps/job/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-mapreduce-project/src/webapps/job:r1333291-1337618