You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2014/11/12 04:47:23 UTC
[01/25] hadoop git commit: YARN-2830. Add backwords compatible
ContainerId.newInstance constructor. Contributed by Jonathan Eagles.
Repository: hadoop
Updated Branches:
refs/heads/HDFS-EC 3a1b3f82d -> 95b3ebaa1
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
index 8f042a8..a7e5d9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
@@ -139,7 +139,7 @@ public class BuilderUtils {
public static ContainerId newContainerId(ApplicationAttemptId appAttemptId,
long containerId) {
- return ContainerId.newInstance(appAttemptId, containerId);
+ return ContainerId.newContainerId(appAttemptId, containerId);
}
public static ContainerId newContainerId(int appId, int appAttemptId,
@@ -164,7 +164,7 @@ public class BuilderUtils {
public static ContainerId newContainerId(RecordFactory recordFactory,
ApplicationId appId, ApplicationAttemptId appAttemptId,
int containerId) {
- return ContainerId.newInstance(appAttemptId, containerId);
+ return ContainerId.newContainerId(appAttemptId, containerId);
}
public static NodeId newNodeId(String host, int port) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
index da25aa2..20983b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
@@ -223,7 +223,7 @@ public class TestYarnServerApiClasses {
}
private ContainerId getContainerId(int containerID, int appAttemptId) {
- ContainerId containerId = ContainerIdPBImpl.newInstance(
+ ContainerId containerId = ContainerIdPBImpl.newContainerId(
getApplicationAttemptId(appAttemptId), containerID);
return containerId;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
index ed902ba..86e49f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
@@ -51,7 +51,7 @@ public class TestProtocolRecords {
public void testNMContainerStatus() {
ApplicationId appId = ApplicationId.newInstance(123456789, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(attemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
Resource resource = Resource.newInstance(1000, 200);
NMContainerStatus report =
@@ -76,7 +76,7 @@ public class TestProtocolRecords {
public void testRegisterNodeManagerRequest() {
ApplicationId appId = ApplicationId.newInstance(123456789, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(attemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
index fdacd92..947dec1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
@@ -38,7 +38,7 @@ public class TestRegisterNodeManagerRequest {
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
"version", Arrays.asList(NMContainerStatus.newInstance(
- ContainerId.newInstance(
+ ContainerId.newContainerId(
ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234L, 1), 1), 1),
ContainerState.RUNNING, Resource.newInstance(1024, 1), "good", -1,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
index fabb03b..d2caefe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -139,7 +139,7 @@ public class TestEventFlow {
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
- ContainerId cID = ContainerId.newInstance(applicationAttemptId, 0);
+ ContainerId cID = ContainerId.newContainerId(applicationAttemptId, 0);
String user = "testing";
StartContainerRequest scRequest =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
index c9e09fa..7417f69 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
@@ -431,7 +431,7 @@ public class TestLinuxContainerExecutor {
ApplicationId appId = ApplicationId.newInstance(12345, 67890);
ApplicationAttemptId attemptId =
ApplicationAttemptId.newInstance(appId, 54321);
- ContainerId cid = ContainerId.newInstance(attemptId, 9876);
+ ContainerId cid = ContainerId.newContainerId(attemptId, 9876);
Configuration conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.NM_LINUX_CONTAINER_RESOURCES_HANDLER,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
index e9aea0e..41c16a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
@@ -290,7 +290,7 @@ public class TestNodeManagerReboot {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
return containerId;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 85bafb3..a58294f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -592,7 +592,7 @@ public class TestNodeManagerResync {
ApplicationId applicationId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
- ContainerId containerId = ContainerId.newInstance(applicationAttemptId, id);
+ ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, id);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, containerState,
Resource.newInstance(1024, 1), "recover container", 0,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
index c44f7b8..2a887ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
@@ -260,7 +260,7 @@ public class TestNodeManagerShutdown {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
return containerId;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index 925a249..b34262b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -224,7 +224,7 @@ public class TestNodeStatusUpdater {
ApplicationAttemptId appAttemptID =
ApplicationAttemptId.newInstance(appId1, 0);
ContainerId firstContainerID =
- ContainerId.newInstance(appAttemptID, heartBeatID);
+ ContainerId.newContainerId(appAttemptID, heartBeatID);
ContainerLaunchContext launchContext = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
Resource resource = BuilderUtils.newResource(2, 1);
@@ -254,7 +254,7 @@ public class TestNodeStatusUpdater {
ApplicationAttemptId appAttemptID =
ApplicationAttemptId.newInstance(appId2, 0);
ContainerId secondContainerID =
- ContainerId.newInstance(appAttemptID, heartBeatID);
+ ContainerId.newContainerId(appAttemptID, heartBeatID);
ContainerLaunchContext launchContext = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
long currentTime = System.currentTimeMillis();
@@ -818,7 +818,7 @@ public class TestNodeStatusUpdater {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
- ContainerId cId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
nm.getNMContext().getApplications().putIfAbsent(appId,
mock(Application.class));
nm.getNMContext().getContainers().putIfAbsent(cId, mock(Container.class));
@@ -855,7 +855,7 @@ public class TestNodeStatusUpdater {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
- ContainerId cId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
Token containerToken =
BuilderUtils.newContainerToken(cId, "anyHost", 1234, "anyUser",
BuilderUtils.newResource(1024, 1), 0, 123,
@@ -876,7 +876,7 @@ public class TestNodeStatusUpdater {
};
ContainerId runningContainerId =
- ContainerId.newInstance(appAttemptId, 3);
+ ContainerId.newContainerId(appAttemptId, 3);
Token runningContainerToken =
BuilderUtils.newContainerToken(runningContainerId, "anyHost",
1234, "anyUser", BuilderUtils.newResource(1024, 1), 0, 123,
@@ -936,7 +936,7 @@ public class TestNodeStatusUpdater {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
- ContainerId cId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
Token containerToken =
BuilderUtils.newContainerToken(cId, "anyHost", 1234, "anyUser",
BuilderUtils.newResource(1024, 1), 0, 123,
@@ -1494,7 +1494,7 @@ public class TestNodeStatusUpdater {
ApplicationId applicationId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
- ContainerId contaierId = ContainerId.newInstance(applicationAttemptId, id);
+ ContainerId contaierId = ContainerId.newContainerId(applicationAttemptId, id);
ContainerStatus containerStatus =
BuilderUtils.newContainerStatus(contaierId, containerState,
"test_containerStatus: id=" + id + ", containerState: "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
index 59cc947..757cdc8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
@@ -189,7 +189,7 @@ public class TestAuxServices {
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId1, 1);
ContainerTokenIdentifier cti = new ContainerTokenIdentifier(
- ContainerId.newInstance(attemptId, 1), "", "",
+ ContainerId.newContainerId(attemptId, 1), "", "",
Resource.newInstance(1, 1), 0,0,0, Priority.newInstance(0), 0);
Container container = new ContainerImpl(null, null, null, null, null,
null, cti);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 45d9925..86cc4dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -104,7 +104,7 @@ public class TestContainerManager extends BaseContainerManagerTest {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, id);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, id);
return containerId;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index 007fc36..a73d583 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -111,7 +111,7 @@ public class TestContainerManagerRecovery {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId attemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId cid = ContainerId.newInstance(attemptId, 1);
+ ContainerId cid = ContainerId.newContainerId(attemptId, 1);
Map<String, LocalResource> localResources = Collections.emptyMap();
Map<String, String> containerEnv = Collections.emptyMap();
List<String> containerCmds = Collections.emptyList();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 001643b..cbc41c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -385,7 +385,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId cId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
Map<String, String> userSetEnv = new HashMap<String, String>();
userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id");
userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST");
@@ -634,7 +634,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId cId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
File processStartFile =
new File(tmpDir, "pid.txt").getAbsoluteFile();
@@ -771,7 +771,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
@Test (timeout = 10000)
public void testCallFailureWithNullLocalizedResources() {
Container container = mock(Container.class);
- when(container.getContainerId()).thenReturn(ContainerId.newInstance(
+ when(container.getContainerId()).thenReturn(ContainerId.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(
System.currentTimeMillis(), 1), 1), 1));
ContainerLaunchContext clc = mock(ContainerLaunchContext.class);
@@ -980,7 +980,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
ApplicationId appId = ApplicationId.newInstance(2, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId cId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
File processStartFile =
new File(tmpDir, "pid.txt").getAbsoluteFile();
File childProcessStartFile =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
index 99d722f..1f2d067 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
@@ -206,7 +206,7 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
// ////// Construct the Container-id
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
- ContainerId cId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
int port = 12345;
URL resource_alpha =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
index db377f5..438cec3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
@@ -226,7 +226,7 @@ public class TestNMLeveldbStateStoreService {
ApplicationId appId = ApplicationId.newInstance(1234, 3);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 4);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 5);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 5);
LocalResource lrsrc = LocalResource.newInstance(
URL.newInstance("hdfs", "somehost", 12345, "/some/path/to/rsrc"),
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, 123L,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
index c07882d..891130f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
@@ -378,7 +378,7 @@ public class TestApplicationCleanup {
// nm1/nm2 register to rm2, and do a heartbeat
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm1.registerNode(Arrays.asList(NMContainerStatus.newInstance(
- ContainerId.newInstance(am0.getApplicationAttemptId(), 1),
+ ContainerId.newContainerId(am0.getApplicationAttemptId(), 1),
ContainerState.COMPLETE, Resource.newInstance(1024, 1), "", 0,
Priority.newInstance(0), 1234)), Arrays.asList(app0.getApplicationId()));
nm2.setResourceTrackerService(rm2.getResourceTrackerService());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 5652b6e..15aca42 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -404,7 +404,7 @@ public class TestClientRMService {
.newRecordInstance(GetContainerReportRequest.class);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(123456, 1), 1);
- ContainerId containerId = ContainerId.newInstance(attemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
request.setContainerId(containerId);
try {
@@ -425,7 +425,7 @@ public class TestClientRMService {
.newRecordInstance(GetContainersRequest.class);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(123456, 1), 1);
- ContainerId containerId = ContainerId.newInstance(attemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
request.setApplicationAttemptId(attemptId);
try {
GetContainersResponse response = rmService.getContainers(request);
@@ -1213,7 +1213,7 @@ public class TestClientRMService {
RMAppAttemptImpl rmAppAttemptImpl = spy(new RMAppAttemptImpl(attemptId,
rmContext, yarnScheduler, null, asContext, config, false, null));
Container container = Container.newInstance(
- ContainerId.newInstance(attemptId, 1), null, "", null, null, null);
+ ContainerId.newContainerId(attemptId, 1), null, "", null, null, null);
RMContainerImpl containerimpl = spy(new RMContainerImpl(container,
attemptId, null, "", rmContext));
Map<ApplicationAttemptId, RMAppAttempt> attempts =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
index c200df4..b9397bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
@@ -207,7 +207,7 @@ public class TestContainerResourceUsage {
// usage metrics. This will cause the attempt to fail, and, since the max
// attempt retries is 1, the app will also fail. This is intentional so
// that all containers will complete prior to saving.
- ContainerId cId = ContainerId.newInstance(attempt0.getAppAttemptId(), 1);
+ ContainerId cId = ContainerId.newContainerId(attempt0.getAppAttemptId(), 1);
nm.nodeHeartbeat(attempt0.getAppAttemptId(),
cId.getContainerId(), ContainerState.COMPLETE);
rm0.waitForState(nm, cId, RMContainerState.COMPLETED);
@@ -289,7 +289,7 @@ public class TestContainerResourceUsage {
// launch the 2nd container.
ContainerId containerId2 =
- ContainerId.newInstance(am0.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am0.getApplicationAttemptId(), 2);
nm.nodeHeartbeat(am0.getApplicationAttemptId(),
containerId2.getContainerId(), ContainerState.RUNNING);
rm.waitForState(nm, containerId2, RMContainerState.RUNNING);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index a9683f1..a0f8627 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -1963,7 +1963,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
public static NMContainerStatus createNMContainerStatus(
ApplicationAttemptId appAttemptId, int id, ContainerState containerState) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId, id);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, id);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, containerState,
Resource.newInstance(1024, 1), "recover container", 0,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 28d1d63..7c12848 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -510,7 +510,7 @@ public class TestResourceTrackerService {
// Case 1.1: AppAttemptId is null
NMContainerStatus report =
NMContainerStatus.newInstance(
- ContainerId.newInstance(
+ ContainerId.newContainerId(
ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1),
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0, Priority.newInstance(10), 1234);
@@ -522,7 +522,7 @@ public class TestResourceTrackerService {
(RMAppAttemptImpl) app.getCurrentAppAttempt();
currentAttempt.setMasterContainer(null);
report = NMContainerStatus.newInstance(
- ContainerId.newInstance(currentAttempt.getAppAttemptId(), 0),
+ ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0),
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0, Priority.newInstance(10), 1234);
rm.getResourceTrackerService().handleNMContainerStatus(report, null);
@@ -533,7 +533,7 @@ public class TestResourceTrackerService {
// Case 2.1: AppAttemptId is null
report = NMContainerStatus.newInstance(
- ContainerId.newInstance(
+ ContainerId.newContainerId(
ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1),
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0, Priority.newInstance(10), 1234);
@@ -549,7 +549,7 @@ public class TestResourceTrackerService {
(RMAppAttemptImpl) app.getCurrentAppAttempt();
currentAttempt.setMasterContainer(null);
report = NMContainerStatus.newInstance(
- ContainerId.newInstance(currentAttempt.getAppAttemptId(), 0),
+ ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0),
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0, Priority.newInstance(10), 1234);
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index 536dbd7..2f0a839 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -849,7 +849,7 @@ public class TestWorkPreservingRMRestart {
// try to release a container before the container is actually recovered.
final ContainerId runningContainer =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
am1.allocate(null, Arrays.asList(runningContainer));
// send container statuses to recover the containers
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java
index 78077d4..f827bf4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ahs/TestRMApplicationHistoryWriter.java
@@ -153,7 +153,7 @@ public class TestRMApplicationHistoryWriter {
when(appAttempt.getRpcPort()).thenReturn(-100);
Container container = mock(Container.class);
when(container.getId())
- .thenReturn(ContainerId.newInstance(appAttemptId, 1));
+ .thenReturn(ContainerId.newContainerId(appAttemptId, 1));
when(appAttempt.getMasterContainer()).thenReturn(container);
when(appAttempt.getDiagnostics()).thenReturn("test diagnostics info");
when(appAttempt.getTrackingUrl()).thenReturn("test url");
@@ -254,7 +254,7 @@ public class TestRMApplicationHistoryWriter {
Assert.assertNotNull(appAttemptHD);
Assert.assertEquals("test host", appAttemptHD.getHost());
Assert.assertEquals(-100, appAttemptHD.getRPCPort());
- Assert.assertEquals(ContainerId.newInstance(
+ Assert.assertEquals(ContainerId.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1), 1),
appAttemptHD.getMasterContainerId());
@@ -281,14 +281,14 @@ public class TestRMApplicationHistoryWriter {
@Test
public void testWriteContainer() throws Exception {
RMContainer container =
- createRMContainer(ContainerId.newInstance(
+ createRMContainer(ContainerId.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1),
1));
writer.containerStarted(container);
ContainerHistoryData containerHD = null;
for (int i = 0; i < MAX_RETRIES; ++i) {
containerHD =
- store.getContainer(ContainerId.newInstance(ApplicationAttemptId
+ store.getContainer(ContainerId.newContainerId(ApplicationAttemptId
.newInstance(ApplicationId.newInstance(0, 1), 1), 1));
if (containerHD != null) {
break;
@@ -307,7 +307,7 @@ public class TestRMApplicationHistoryWriter {
writer.containerFinished(container);
for (int i = 0; i < MAX_RETRIES; ++i) {
containerHD =
- store.getContainer(ContainerId.newInstance(ApplicationAttemptId
+ store.getContainer(ContainerId.newContainerId(ApplicationAttemptId
.newInstance(ApplicationId.newInstance(0, 1), 1), 1));
if (containerHD.getContainerState() != null) {
break;
@@ -337,7 +337,7 @@ public class TestRMApplicationHistoryWriter {
RMAppAttempt appAttempt = createRMAppAttempt(appAttemptId);
writer.applicationAttemptStarted(appAttempt);
for (int k = 1; k <= 10; ++k) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
RMContainer container = createRMContainer(containerId);
writer.containerStarted(container);
writer.containerFinished(container);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 800f65b..62e3e5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -189,7 +189,7 @@ public abstract class MockAsm extends MockApps {
final ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(newAppID(i), 0);
final Container masterContainer = Records.newRecord(Container.class);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
masterContainer.setId(containerId);
masterContainer.setNodeHttpAddress("node:port");
final String user = newUserName();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index fcb4e45..a93123e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -101,20 +101,20 @@ public class TestAMRestart {
// launch the 2nd container, for testing running container transferred.
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
ContainerId containerId2 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
// launch the 3rd container, for testing container allocated by previous
// attempt is completed by the next new attempt/
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING);
ContainerId containerId3 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 3);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING);
// 4th container still in AQUIRED state. for testing Acquired container is
// always killed.
ContainerId containerId4 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 4);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 4);
rm1.waitForState(nm1, containerId4, RMContainerState.ACQUIRED);
// 5th container is in Allocated state. for testing allocated container is
@@ -122,14 +122,14 @@ public class TestAMRestart {
am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
nm1.nodeHeartbeat(true);
ContainerId containerId5 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 5);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 5);
rm1.waitForContainerAllocated(nm1, containerId5);
rm1.waitForState(nm1, containerId5, RMContainerState.ALLOCATED);
// 6th container is in Reserved state.
am1.allocate("127.0.0.1", 6000, 1, new ArrayList<ContainerId>());
ContainerId containerId6 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 6);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 6);
nm1.nodeHeartbeat(true);
SchedulerApplicationAttempt schedulerAttempt =
((AbstractYarnScheduler) rm1.getResourceScheduler())
@@ -295,12 +295,12 @@ public class TestAMRestart {
// launch the container-2
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
ContainerId containerId2 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
// launch the container-3
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING);
ContainerId containerId3 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 3);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING);
// fail am1
@@ -335,7 +335,7 @@ public class TestAMRestart {
}
nm1.nodeHeartbeat(am2.getApplicationAttemptId(), 2, ContainerState.RUNNING);
ContainerId am2ContainerId2 =
- ContainerId.newInstance(am2.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, am2ContainerId2, RMContainerState.RUNNING);
// fail am2.
@@ -379,7 +379,7 @@ public class TestAMRestart {
CapacityScheduler scheduler =
(CapacityScheduler) rm1.getResourceScheduler();
ContainerId amContainer =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 1);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
// Preempt the first attempt;
scheduler.killContainer(scheduler.getRMContainer(amContainer));
@@ -396,7 +396,7 @@ public class TestAMRestart {
// Preempt the second attempt.
ContainerId amContainer2 =
- ContainerId.newInstance(am2.getApplicationAttemptId(), 1);
+ ContainerId.newContainerId(am2.getApplicationAttemptId(), 1);
scheduler.killContainer(scheduler.getRMContainer(amContainer2));
am2.waitForState(RMAppAttemptState.FAILED);
@@ -487,7 +487,7 @@ public class TestAMRestart {
CapacityScheduler scheduler =
(CapacityScheduler) rm1.getResourceScheduler();
ContainerId amContainer =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 1);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
// Forcibly preempt the am container;
scheduler.killContainer(scheduler.getRMContainer(amContainer));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index bc509a0..65c8547 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -250,7 +250,7 @@ public class TestSystemMetricsPublisher {
@Test(timeout = 10000)
public void testPublishContainerMetrics() throws Exception {
ContainerId containerId =
- ContainerId.newInstance(ApplicationAttemptId.newInstance(
+ ContainerId.newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0, 1), 1), 1);
RMContainer container = createRMContainer(containerId);
metricsPublisher.containerCreated(container, container.getCreationTime());
@@ -347,7 +347,7 @@ public class TestSystemMetricsPublisher {
when(appAttempt.getRpcPort()).thenReturn(-100);
Container container = mock(Container.class);
when(container.getId())
- .thenReturn(ContainerId.newInstance(appAttemptId, 1));
+ .thenReturn(ContainerId.newContainerId(appAttemptId, 1));
when(appAttempt.getMasterContainer()).thenReturn(container);
when(appAttempt.getDiagnostics()).thenReturn("test diagnostics info");
when(appAttempt.getTrackingUrl()).thenReturn("test tracking url");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index a0c2b01..24e70bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -728,7 +728,7 @@ public class TestProportionalCapacityPreemptionPolicy {
RMContainer mockContainer(ApplicationAttemptId appAttId, int id,
Resource r, int priority) {
- ContainerId cId = ContainerId.newInstance(appAttId, id);
+ ContainerId cId = ContainerId.newContainerId(appAttId, id);
Container c = mock(Container.class);
when(c.getResource()).thenReturn(r);
when(c.getPriority()).thenReturn(Priority.create(priority));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index e5daf6f..2b5c2b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -1395,7 +1395,7 @@ public class TestRMAppAttemptTransitions {
// failed attempt captured the container finished event.
assertEquals(0, applicationAttempt.getJustFinishedContainers().size());
ContainerStatus cs2 =
- ContainerStatus.newInstance(ContainerId.newInstance(appAttemptId, 2),
+ ContainerStatus.newInstance(ContainerId.newContainerId(appAttemptId, 2),
ContainerState.COMPLETE, "", 0);
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(
appAttemptId, cs2, anyNodeId));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 553587e..76cdcae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -249,7 +249,7 @@ public class TestRMContainerImpl {
// request a container.
am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
- ContainerId containerId2 = ContainerId.newInstance(
+ ContainerId containerId2 = ContainerId.newContainerId(
am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index c168b95..c648b83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -138,7 +138,7 @@ public class TestSchedulerApplicationAttempt {
private RMContainer createRMContainer(ApplicationAttemptId appAttId, int id,
Resource resource) {
- ContainerId containerId = ContainerId.newInstance(appAttId, id);
+ ContainerId containerId = ContainerId.newContainerId(appAttId, id);
RMContainer rmContainer = mock(RMContainer.class);
Container container = mock(Container.class);
when(container.getResource()).thenReturn(resource);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
index c3ae38c..c9e81ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
@@ -560,7 +560,7 @@ public class TestSchedulerUtils {
@Test
public void testCreateAbnormalContainerStatus() {
ContainerStatus cd = SchedulerUtils.createAbnormalContainerStatus(
- ContainerId.newInstance(ApplicationAttemptId.newInstance(
+ ContainerId.newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x");
Assert.assertEquals(ContainerExitStatus.ABORTED, cd.getExitStatus());
}
@@ -568,7 +568,7 @@ public class TestSchedulerUtils {
@Test
public void testCreatePreemptedContainerStatus() {
ContainerStatus cd = SchedulerUtils.createPreemptedContainerStatus(
- ContainerId.newInstance(ApplicationAttemptId.newInstance(
+ ContainerId.newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x");
Assert.assertEquals(ContainerExitStatus.PREEMPTED, cd.getExitStatus());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 98dc673..2aa57a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -1085,7 +1085,7 @@ public class TestCapacityScheduler {
// request a container.
am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
- ContainerId containerId1 = ContainerId.newInstance(
+ ContainerId containerId1 = ContainerId.newContainerId(
am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId1, RMContainerState.ALLOCATED);
@@ -1122,7 +1122,7 @@ public class TestCapacityScheduler {
}
// New container will be allocated and will move to ALLOCATED state
- ContainerId containerId2 = ContainerId.newInstance(
+ ContainerId containerId2 = ContainerId.newContainerId(
am1.getApplicationAttemptId(), 3);
rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index 0c32c0c..ad834ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -164,7 +164,7 @@ public class TestContainerAllocation {
// request a container.
am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
ContainerId containerId2 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
RMContainer container =
@@ -194,7 +194,7 @@ public class TestContainerAllocation {
// request a container.
am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
ContainerId containerId2 =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
// acquire the container.
@@ -247,7 +247,7 @@ public class TestContainerAllocation {
// request a container.
am2.allocate("127.0.0.1", 512, 1, new ArrayList<ContainerId>());
ContainerId containerId =
- ContainerId.newInstance(am2.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId, RMContainerState.ALLOCATED);
// acquire the container.
@@ -480,13 +480,13 @@ public class TestContainerAllocation {
// A has only 10% of x, so it can only allocate one container in label=empty
ContainerId containerId =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
Assert.assertTrue(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
// Cannot allocate 2nd label=empty container
containerId =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 3);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
Assert.assertFalse(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
@@ -495,7 +495,7 @@ public class TestContainerAllocation {
// We can allocate floor(8000 / 1024) = 7 containers
for (int id = 3; id <= 8; id++) {
containerId =
- ContainerId.newInstance(am1.getApplicationAttemptId(), id);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), id);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
Assert.assertTrue(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
@@ -571,7 +571,7 @@ public class TestContainerAllocation {
// request a container (label = x && y). can only allocate on nm2
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x && y");
containerId =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm2, containerId,
@@ -588,7 +588,7 @@ public class TestContainerAllocation {
// and now b1's queue capacity will be used, cannot allocate more containers
// (Maximum capacity reached)
am2.allocate("*", 1024, 1, new ArrayList<ContainerId>());
- containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2);
+ containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm4, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertFalse(rm1.waitForState(nm5, containerId,
@@ -601,7 +601,7 @@ public class TestContainerAllocation {
// request a container. try to allocate on nm1 (label = x) and nm3 (label =
// y,z). Will successfully allocate on nm3
am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y");
- containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2);
+ containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm3, containerId,
@@ -612,7 +612,7 @@ public class TestContainerAllocation {
// try to allocate container (request label = y && z) on nm3 (label = y) and
// nm4 (label = y,z). Will sucessfully allocate on nm4 only.
am3.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y && z");
- containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 3);
+ containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 3);
Assert.assertFalse(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm4, containerId,
@@ -654,7 +654,7 @@ public class TestContainerAllocation {
// request a container.
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
containerId =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm1, containerId,
@@ -669,7 +669,7 @@ public class TestContainerAllocation {
// request a container.
am2.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y");
- containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2);
+ containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm1, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm2, containerId,
@@ -684,7 +684,7 @@ public class TestContainerAllocation {
// request a container.
am3.allocate("*", 1024, 1, new ArrayList<ContainerId>());
- containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2);
+ containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm3, containerId,
@@ -730,7 +730,7 @@ public class TestContainerAllocation {
// request a container.
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>());
containerId =
- ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
+ ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm1, containerId,
@@ -745,7 +745,7 @@ public class TestContainerAllocation {
// request a container.
am2.allocate("*", 1024, 1, new ArrayList<ContainerId>());
- containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2);
+ containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm3, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm2, containerId,
@@ -760,7 +760,7 @@ public class TestContainerAllocation {
// request a container.
am3.allocate("*", 1024, 1, new ArrayList<ContainerId>());
- containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2);
+ containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
Assert.assertFalse(rm1.waitForState(nm2, containerId,
RMContainerState.ALLOCATED, 10 * 1000));
Assert.assertTrue(rm1.waitForState(nm3, containerId,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 843555f..61cbdc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -3530,7 +3530,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
// ResourceRequest will be empty once NodeUpdate is completed
Assert.assertNull(app.getResourceRequest(priority, host));
- ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1);
RMContainer rmContainer = app.getRMContainer(containerId1);
// Create a preempt event and register for preemption
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index de8d302..f0dcb56 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -231,7 +231,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
ApplicationAttemptId.newInstance(appId, 1);
ContainerId validContainerId =
- ContainerId.newInstance(validAppAttemptId, 0);
+ ContainerId.newContainerId(validAppAttemptId, 0);
NodeId validNode = yarnCluster.getNodeManager(0).getNMContext().getNodeId();
NodeId invalidNode = NodeId.newInstance("InvalidHost", 1234);
@@ -311,7 +311,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
ApplicationAttemptId.newInstance(appId, 2);
ContainerId validContainerId2 =
- ContainerId.newInstance(validAppAttemptId2, 0);
+ ContainerId.newContainerId(validAppAttemptId2, 0);
org.apache.hadoop.yarn.api.records.Token validContainerToken2 =
containerTokenSecretManager.createContainerToken(validContainerId2,
@@ -401,7 +401,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
.createNMToken(validAppAttemptId, validNode, user);
org.apache.hadoop.yarn.api.records.Token newContainerToken =
containerTokenSecretManager.createContainerToken(
- ContainerId.newInstance(attempt2, 1), validNode, user, r,
+ ContainerId.newContainerId(attempt2, 1), validNode, user, r,
Priority.newInstance(0), 0);
Assert.assertTrue(testStartContainer(rpc, attempt2, validNode,
newContainerToken, attempt1NMToken, false).isEmpty());
@@ -638,7 +638,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
- ContainerId cId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM =
nm.getNMContext().getNMTokenSecretManager();
@@ -691,7 +691,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
} while (containerTokenSecretManager.getCurrentKey().getKeyId()
== tamperedContainerTokenSecretManager.getCurrentKey().getKeyId());
- ContainerId cId2 = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId cId2 = ContainerId.newContainerId(appAttemptId, 1);
// Creating modified containerToken
Token containerToken2 =
tamperedContainerTokenSecretManager.createContainerToken(cId2, nodeId,
@@ -733,7 +733,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
- ContainerId cId = ContainerId.newInstance(appAttemptId, (5L << 40) | 3L);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM =
nm.getNMContext().getNMTokenSecretManager();
[17/25] hadoop git commit: YARN-2841: Correct fix version from
branch-2.6 to branch-2.7 in the change log.
Posted by vi...@apache.org.
YARN-2841: Correct fix version from branch-2.6 to branch-2.7 in the
change log.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58e9bf4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58e9bf4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58e9bf4b
Branch: refs/heads/HDFS-EC
Commit: 58e9bf4b908e0b21309006eba49899b092f38071
Parents: 5c9a51f
Author: Xuan <xg...@apache.org>
Authored: Mon Nov 10 18:31:25 2014 -0800
Committer: Xuan <xg...@apache.org>
Committed: Mon Nov 10 18:31:25 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e9bf4b/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e134d6b..9bb016d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -72,6 +72,8 @@ Release 2.7.0 - UNRELEASED
YARN-2713. "RM Home" link in NM should point to one of the RMs in an
HA setup. (kasha)
+ YARN-2841. RMProxy should retry EOFException. (Jian He via xgong)
+
Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
@@ -929,8 +931,6 @@ Release 2.6.0 - 2014-11-15
consistent with the (somewhat incorrect) behaviour in the non-recovery case.
(Jian He via vinodkv)
- YARN-2841. RMProxy should retry EOFException. (Jian He via xgong)
-
Release 2.5.2 - 2014-11-10
INCOMPATIBLE CHANGES
[25/25] hadoop git commit: Merge remote-tracking branch
'origin/trunk' into HDFS-EC
Posted by vi...@apache.org.
Merge remote-tracking branch 'origin/trunk' into HDFS-EC
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95b3ebaa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95b3ebaa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95b3ebaa
Branch: refs/heads/HDFS-EC
Commit: 95b3ebaa1bb8d4a8da4ec4daf2ac3287d0e43b04
Parents: 3a1b3f8 46f6f9d
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Nov 12 09:15:22 2014 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Nov 12 09:15:22 2014 +0530
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 14 +-
.../src/main/docs/releasenotes.html | 3525 ++++++++++++++++++
.../java/org/apache/hadoop/net/NetUtils.java | 8 +
.../hadoop/security/UserGroupInformation.java | 38 +-
.../org/apache/hadoop/net/TestNetUtils.java | 12 +
.../hadoop/security/TestUGILoginFromKeytab.java | 91 +
.../hadoop/nfs/nfs3/Nfs3FileAttributes.java | 29 +-
.../java/org/apache/hadoop/oncrpc/RpcUtil.java | 2 +-
.../apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java | 25 +-
.../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 88 +-
.../apache/hadoop/hdfs/nfs/nfs3/TestWrites.java | 107 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 19 +-
.../server/blockmanagement/BlockIdManager.java | 208 ++
.../server/blockmanagement/BlockManager.java | 14 +-
.../blockmanagement/BlockPlacementPolicy.java | 1 -
.../BlockPlacementPolicyDefault.java | 1 -
.../BlockPlacementPolicyWithNodeGroup.java | 1 -
.../server/blockmanagement/DatanodeManager.java | 40 +-
.../server/blockmanagement/FSClusterStats.java | 60 +
.../SequentialBlockIdGenerator.java | 66 +
.../hadoop/hdfs/server/datanode/DataNode.java | 2 +-
.../hdfs/server/datanode/DatanodeUtil.java | 21 +
.../datanode/fsdataset/impl/FsDatasetCache.java | 4 +-
.../hdfs/server/namenode/FSClusterStats.java | 62 -
.../hdfs/server/namenode/FSEditLogLoader.java | 6 +-
.../hdfs/server/namenode/FSImageFormat.java | 22 +-
.../server/namenode/FSImageFormatProtobuf.java | 23 +-
.../hdfs/server/namenode/FSNamesystem.java | 212 +-
.../server/namenode/FSPermissionChecker.java | 1 +
.../namenode/SequentialBlockIdGenerator.java | 66 -
.../blockmanagement/TestBlockManager.java | 10 +-
.../blockmanagement/TestReplicationPolicy.java | 13 +-
.../TestReplicationPolicyConsiderLoad.java | 6 +-
.../blockmanagement/TestSequentialBlockId.java | 197 +
.../hdfs/server/namenode/FSAclBaseTest.java | 37 +-
.../server/namenode/TestNameNodeMXBean.java | 126 +-
.../namenode/TestNamenodeCapacityReport.java | 72 +-
.../hdfs/server/namenode/TestSaveNamespace.java | 11 +-
.../server/namenode/TestSequentialBlockId.java | 209 --
hadoop-mapreduce-project/CHANGES.txt | 4 +-
.../v2/app/local/LocalContainerAllocator.java | 2 +-
.../jobhistory/TestJobHistoryEventHandler.java | 2 +-
.../apache/hadoop/mapreduce/v2/app/MRApp.java | 6 +-
.../hadoop/mapreduce/v2/app/MRAppBenchmark.java | 4 +-
.../hadoop/mapreduce/v2/app/MockJobs.java | 6 +-
.../v2/app/TestCheckpointPreemptionPolicy.java | 2 +-
.../v2/app/TestKillAMPreemptionPolicy.java | 2 +-
.../mapreduce/v2/app/TestMRAppMaster.java | 2 +-
.../mapreduce/v2/app/TestStagingCleanup.java | 2 +-
.../v2/app/job/impl/TestTaskAttempt.java | 22 +-
.../v2/app/launcher/TestContainerLauncher.java | 8 +-
.../app/launcher/TestContainerLauncherImpl.java | 2 +-
.../v2/app/rm/TestRMContainerAllocator.java | 10 +-
.../mapreduce/v2/hs/webapp/TestBlocks.java | 2 +-
.../v2/TestMRJobsWithHistoryService.java | 2 +-
hadoop-yarn-project/CHANGES.txt | 22 +-
.../hadoop/yarn/api/records/ContainerId.java | 16 +-
.../UnmanagedAMLauncher.java | 2 +-
.../hadoop/yarn/client/cli/RMAdminCLI.java | 14 +-
.../hadoop/yarn/client/ProtocolHATestBase.java | 2 +-
.../api/async/impl/TestAMRMClientAsync.java | 2 +-
.../api/async/impl/TestNMClientAsync.java | 2 +-
.../yarn/client/api/impl/TestAHSClient.java | 16 +-
.../api/impl/TestAMRMClientOnRMRestart.java | 2 +-
.../yarn/client/api/impl/TestYarnClient.java | 16 +-
.../hadoop/yarn/client/cli/TestLogsCLI.java | 6 +-
.../hadoop/yarn/client/cli/TestYarnCLI.java | 14 +-
.../org/apache/hadoop/yarn/client/RMProxy.java | 2 +
.../apache/hadoop/yarn/client/ServerProxy.java | 2 +
.../nodelabels/CommonNodeLabelsManager.java | 20 +-
.../apache/hadoop/yarn/util/ConverterUtils.java | 2 +-
.../org/apache/hadoop/yarn/util/FSDownload.java | 17 +-
.../java/org/apache/hadoop/yarn/util/Times.java | 3 +-
.../resources/webapps/static/yarn.dt.plugins.js | 27 +-
.../hadoop/yarn/TestContainerLaunchRPC.java | 2 +-
.../java/org/apache/hadoop/yarn/TestRPC.java | 2 +-
.../apache/hadoop/yarn/api/TestContainerId.java | 2 +-
.../yarn/api/TestContainerResourceDecrease.java | 2 +-
.../yarn/api/TestContainerResourceIncrease.java | 2 +-
.../TestContainerResourceIncreaseRequest.java | 2 +-
.../logaggregation/TestAggregatedLogFormat.java | 2 +-
.../logaggregation/TestAggregatedLogsBlock.java | 2 +-
.../yarn/nodelabels/NodeLabelTestBase.java | 12 +-
.../nodelabels/TestCommonNodeLabelsManager.java | 23 +
.../yarn/security/TestYARNTokenIdentifier.java | 2 +-
.../ApplicationHistoryStoreTestUtils.java | 2 +-
.../TestApplicationHistoryClientService.java | 6 +-
...pplicationHistoryManagerOnTimelineStore.java | 8 +-
.../TestFileSystemApplicationHistoryStore.java | 10 +-
.../TestMemoryApplicationHistoryStore.java | 12 +-
.../webapp/TestAHSWebApp.java | 4 +-
.../webapp/TestAHSWebServices.java | 2 +-
.../hadoop/yarn/server/utils/BuilderUtils.java | 4 +-
.../hadoop/yarn/TestYarnServerApiClasses.java | 2 +-
.../protocolrecords/TestProtocolRecords.java | 4 +-
.../TestRegisterNodeManagerRequest.java | 2 +-
.../server/nodemanager/DirectoryCollection.java | 2 -
.../yarn/server/nodemanager/TestEventFlow.java | 2 +-
.../nodemanager/TestLinuxContainerExecutor.java | 2 +-
.../nodemanager/TestNodeManagerReboot.java | 2 +-
.../nodemanager/TestNodeManagerResync.java | 2 +-
.../nodemanager/TestNodeManagerShutdown.java | 2 +-
.../nodemanager/TestNodeStatusUpdater.java | 21 +-
.../containermanager/TestAuxServices.java | 2 +-
.../containermanager/TestContainerManager.java | 2 +-
.../TestContainerManagerRecovery.java | 2 +-
.../launcher/TestContainerLaunch.java | 8 +-
.../monitor/TestContainersMonitor.java | 2 +-
.../TestNMLeveldbStateStoreService.java | 2 +-
.../server/resourcemanager/rmapp/RMAppImpl.java | 27 +-
.../rmapp/attempt/RMAppAttemptImpl.java | 4 +-
.../resourcemanager/TestApplicationCleanup.java | 2 +-
.../resourcemanager/TestClientRMService.java | 6 +-
.../TestContainerResourceUsage.java | 4 +-
.../server/resourcemanager/TestRMRestart.java | 2 +-
.../TestResourceTrackerService.java | 8 +-
.../TestWorkPreservingRMRestart.java | 69 +-
.../ahs/TestRMApplicationHistoryWriter.java | 12 +-
.../applicationsmanager/MockAsm.java | 2 +-
.../applicationsmanager/TestAMRestart.java | 22 +-
.../metrics/TestSystemMetricsPublisher.java | 4 +-
...estProportionalCapacityPreemptionPolicy.java | 2 +-
.../rmapp/TestRMAppTransitions.java | 28 -
.../attempt/TestRMAppAttemptTransitions.java | 2 +-
.../rmcontainer/TestRMContainerImpl.java | 2 +-
.../TestSchedulerApplicationAttempt.java | 2 +-
.../scheduler/TestSchedulerUtils.java | 4 +-
.../capacity/TestCapacityScheduler.java | 4 +-
.../capacity/TestContainerAllocation.java | 32 +-
.../scheduler/fair/TestFairScheduler.java | 2 +-
.../server/TestContainerManagerSecurity.java | 12 +-
131 files changed, 5073 insertions(+), 1053 deletions(-)
----------------------------------------------------------------------
[22/25] hadoop git commit: YARN-570. Time strings are formated in
different timezone. (Akira Ajisaka and Peng Zhang via kasha)
Posted by vi...@apache.org.
YARN-570. Time strings are formated in different timezone. (Akira Ajisaka and Peng Zhang via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/456b9738
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/456b9738
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/456b9738
Branch: refs/heads/HDFS-EC
Commit: 456b973819904e9647dabad292d2d6205dd84399
Parents: 99d9d0c
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue Nov 11 13:22:48 2014 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Tue Nov 11 13:22:59 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/yarn/util/Times.java | 3 ++-
.../resources/webapps/static/yarn.dt.plugins.js | 27 +++++++++++++++++++-
3 files changed, 31 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/456b9738/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3dc6d9f..b40d9b7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -65,6 +65,9 @@ Release 2.7.0 - UNRELEASED
YARN-2735. diskUtilizationPercentageCutoff and diskUtilizationSpaceCutoff
are initialized twice in DirectoryCollection. (Zhihai Xu via kasha)
+ YARN-570. Time strings are formated in different timezone.
+ (Akira Ajisaka and Peng Zhang via kasha)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/456b9738/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
index 92cc72a..8ae3842 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Times.java
@@ -29,10 +29,11 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
public class Times {
private static final Log LOG = LogFactory.getLog(Times.class);
+ // This format should match the one used in yarn.dt.plugins.js
static final ThreadLocal<SimpleDateFormat> dateFormat =
new ThreadLocal<SimpleDateFormat>() {
@Override protected SimpleDateFormat initialValue() {
- return new SimpleDateFormat("d-MMM-yyyy HH:mm:ss");
+ return new SimpleDateFormat("EEE MMM dd HH:mm:ss Z yyyy");
}
};
http://git-wip-us.apache.org/repos/asf/hadoop/blob/456b9738/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
index 0c683e7..7b069df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
@@ -78,13 +78,38 @@ function renderHadoopDate(data, type, full) {
if(data === '0'|| data === '-1') {
return "N/A";
}
- return new Date(parseInt(data)).toUTCString();
+ var date = new Date(parseInt(data));
+ var monthList = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
+ var weekdayList = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"];
+ var offsetMinutes = date.getTimezoneOffset();
+ var offset
+ if (offsetMinutes <= 0) {
+ offset = "+" + zeroPad(-offsetMinutes / 60 * 100, 4);
+ } else {
+ offset = "-" + zeroPad(offsetMinutes / 60 * 100, 4);
+ }
+
+ // EEE MMM dd HH:mm:ss Z yyyy
+ return weekdayList[date.getDay()] + " " +
+ monthList[date.getMonth()] + " " +
+ date.getDate() + " " +
+ zeroPad(date.getHours(), 2) + ":" +
+ zeroPad(date.getMinutes(), 2) + ":" +
+ zeroPad(date.getSeconds(), 2) + " " +
+ offset + " " +
+ date.getFullYear();
}
// 'sort', 'type' and undefined all just use the number
// If date is 0, then for purposes of sorting it should be consider max_int
return data === '0' ? '9007199254740992' : data;
}
+function zeroPad(n, width) {
+ n = n + '';
+ return n.length >= width ? n : new Array(width - n.length + 1).join('0') + n;
+}
+
function renderHadoopElapsedTime(data, type, full) {
if (type === 'display' || type === 'filter') {
var timeDiff = parseInt(data);
[12/25] hadoop git commit: HADOOP-11289. Fix typo in RpcUtil log
message. Contributed by Charles Lamb.
Posted by vi...@apache.org.
HADOOP-11289. Fix typo in RpcUtil log message. Contributed by Charles Lamb.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eace2184
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eace2184
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eace2184
Branch: refs/heads/HDFS-EC
Commit: eace218411a7733abb8dfca6aaa4eb0557e25e0c
Parents: ab30d51
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Nov 10 11:04:41 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Nov 10 11:06:20 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/eace2184/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index cf91b30..86d81ad 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -417,6 +417,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11187 NameNode - KMS communication fails after a long period of
inactivity. (Arun Suresh via atm)
+ HADOOP-11289. Fix typo in RpcUtil log message. (Charles Lamb via wheat9)
+
Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/eace2184/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
index e9878b7..cbc9943 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
@@ -125,7 +125,7 @@ public final class RpcUtil {
info = new RpcInfo(callHeader, dataBuffer, ctx, e.getChannel(),
e.getRemoteAddress());
} catch (Exception exc) {
- LOG.info("Malfromed RPC request from " + e.getRemoteAddress());
+ LOG.info("Malformed RPC request from " + e.getRemoteAddress());
}
if (info != null) {
[04/25] hadoop git commit: HDFS-7383.
DataNode.requestShortCircuitFdsForRead may throw NullPointerException.
Contributed by Tsz Wo Nicholas Sze.
Posted by vi...@apache.org.
HDFS-7383. DataNode.requestShortCircuitFdsForRead may throw NullPointerException. Contributed by Tsz Wo Nicholas Sze.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ddc5cad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ddc5cad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ddc5cad
Branch: refs/heads/HDFS-EC
Commit: 4ddc5cad0a4175f7f5ef9504a7365601dc7e63b4
Parents: a37a993
Author: Suresh Srinivas <su...@yahoo-inc.com>
Authored: Sun Nov 9 17:55:03 2014 -0800
Committer: Suresh Srinivas <su...@yahoo-inc.com>
Committed: Sun Nov 9 17:55:03 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hadoop/hdfs/server/datanode/DataNode.java | 2 +-
.../hdfs/server/datanode/DatanodeUtil.java | 21 ++++++++++++++++++++
.../datanode/fsdataset/impl/FsDatasetCache.java | 4 ++--
4 files changed, 27 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ddc5cad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6bde9bc..af18379 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -407,6 +407,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7366. BlockInfo should take replication as an short in the constructor.
(Li Lu via wheat9)
+ HDFS-7383. DataNode.requestShortCircuitFdsForRead may throw
+ NullPointerException. (szetszwo via suresh)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ddc5cad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6bd27fa..adfbaf3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1543,7 +1543,7 @@ public class DataNode extends ReconfigurableBase
try {
fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0);
- fis[1] = (FileInputStream)data.getMetaDataInputStream(blk).getWrappedStream();
+ fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data);
} catch (ClassCastException e) {
LOG.debug("requestShortCircuitFdsForRead failed", e);
throw new ShortCircuitFdsUnsupportedException("This DataNode's " +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ddc5cad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
index bd1ba2f..746c3f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
@@ -18,10 +18,15 @@
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
/** Provide utility methods for Datanode. */
@InterfaceAudience.Private
@@ -114,4 +119,20 @@ public class DatanodeUtil {
DataStorage.BLOCK_SUBDIR_PREFIX + d2;
return new File(root, path);
}
+
+ /**
+ * @return the FileInputStream for the meta data of the given block.
+ * @throws FileNotFoundException
+ * if the file not found.
+ * @throws ClassCastException
+ * if the underlying input stream is not a FileInputStream.
+ */
+ public static FileInputStream getMetaDataInputStream(
+ ExtendedBlock b, FsDatasetSpi<?> data) throws IOException {
+ final LengthInputStream lin = data.getMetaDataInputStream(b);
+ if (lin == null) {
+ throw new FileNotFoundException("Meta file for " + b + " not found.");
+ }
+ return (FileInputStream)lin.getWrappedStream();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ddc5cad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
index 4acfc8f..c6408e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
@@ -373,8 +374,7 @@ public class FsDatasetCache {
reservedBytes = true;
try {
blockIn = (FileInputStream)dataset.getBlockInputStream(extBlk, 0);
- metaIn = (FileInputStream)dataset.getMetaDataInputStream(extBlk)
- .getWrappedStream();
+ metaIn = DatanodeUtil.getMetaDataInputStream(extBlk, dataset);
} catch (ClassCastException e) {
LOG.warn("Failed to cache " + key +
": Underlying blocks are not backed by files.", e);
[24/25] hadoop git commit: HDFS-7375. Move FSClusterStats to
o.a.h.h.hdfs.server.blockmanagement. Contributed by Haohui Mai.
Posted by vi...@apache.org.
HDFS-7375. Move FSClusterStats to o.a.h.h.hdfs.server.blockmanagement. Contributed by Haohui Mai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46f6f9d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46f6f9d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46f6f9d6
Branch: refs/heads/HDFS-EC
Commit: 46f6f9d60d0a2c1f441a0e81a071b08c24dbd6d6
Parents: 163bb55
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 11 18:22:40 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 11 18:22:40 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../server/blockmanagement/BlockManager.java | 14 +--
.../blockmanagement/BlockPlacementPolicy.java | 1 -
.../BlockPlacementPolicyDefault.java | 1 -
.../BlockPlacementPolicyWithNodeGroup.java | 1 -
.../server/blockmanagement/DatanodeManager.java | 40 +++++-
.../server/blockmanagement/FSClusterStats.java | 60 +++++++++
.../hdfs/server/namenode/FSClusterStats.java | 62 ---------
.../hdfs/server/namenode/FSNamesystem.java | 28 +----
.../blockmanagement/TestBlockManager.java | 10 +-
.../blockmanagement/TestReplicationPolicy.java | 13 +-
.../TestReplicationPolicyConsiderLoad.java | 6 +-
.../server/namenode/TestNameNodeMXBean.java | 126 +++++++++++--------
.../namenode/TestNamenodeCapacityReport.java | 72 +++++------
14 files changed, 234 insertions(+), 203 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 07762bf..ea89344 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -353,6 +353,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7381. Decouple the management of block id and gen stamps from
FSNamesystem. (wheat9)
+ HDFS-7375. Move FSClusterStats to o.a.h.h.hdfs.server.blockmanagement.
+ (wheat9)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5531400..b8dcd88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
@@ -111,7 +110,7 @@ public class BlockManager {
private final DatanodeManager datanodeManager;
private final HeartbeatManager heartbeatManager;
private final BlockTokenSecretManager blockTokenSecretManager;
-
+
private final PendingDataNodeMessages pendingDNMessages =
new PendingDataNodeMessages();
@@ -264,9 +263,9 @@ public class BlockManager {
/** Check whether name system is running before terminating */
private boolean checkNSRunning = true;
-
- public BlockManager(final Namesystem namesystem, final FSClusterStats stats,
- final Configuration conf) throws IOException {
+
+ public BlockManager(final Namesystem namesystem, final Configuration conf)
+ throws IOException {
this.namesystem = namesystem;
datanodeManager = new DatanodeManager(this, namesystem, conf);
heartbeatManager = datanodeManager.getHeartbeatManager();
@@ -281,8 +280,9 @@ public class BlockManager {
blocksMap = new BlocksMap(
LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
blockplacement = BlockPlacementPolicy.getInstance(
- conf, stats, datanodeManager.getNetworkTopology(),
- datanodeManager.getHost2DatanodeMap());
+ conf, datanodeManager.getFSClusterStats(),
+ datanodeManager.getNetworkTopology(),
+ datanodeManager.getHost2DatanodeMap());
storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
pendingReplications = new PendingReplicationBlocks(conf.getInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index 26a55a2..caeb6ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.ReflectionUtils;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 5b02384..30ab5a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
index 60e192b..19fcb14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NetworkTopologyWithNodeGroup;
import org.apache.hadoop.net.Node;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 3b03d1d..d19aad7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -66,6 +66,7 @@ public class DatanodeManager {
private final Namesystem namesystem;
private final BlockManager blockManager;
private final HeartbeatManager heartbeatManager;
+ private final FSClusterStats fsClusterStats;
private Daemon decommissionthread = null;
/**
@@ -169,7 +170,7 @@ public class DatanodeManager {
* directives that we've already sent.
*/
private final long timeBetweenResendingCachingDirectivesMs;
-
+
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
final Configuration conf) throws IOException {
this.namesystem = namesystem;
@@ -178,6 +179,7 @@ public class DatanodeManager {
networktopology = NetworkTopology.getInstance(conf);
this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);
+ this.fsClusterStats = newFSClusterStats();
this.defaultXferPort = NetUtils.createSocketAddr(
conf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
@@ -329,6 +331,11 @@ public class DatanodeManager {
return heartbeatManager;
}
+ @VisibleForTesting
+ public FSClusterStats getFSClusterStats() {
+ return fsClusterStats;
+ }
+
/** @return the datanode statistics. */
public DatanodeStatistics getDatanodeStatistics() {
return heartbeatManager;
@@ -1595,5 +1602,36 @@ public class DatanodeManager {
public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) {
this.shouldSendCachingCommands = shouldSendCachingCommands;
}
+
+ FSClusterStats newFSClusterStats() {
+ return new FSClusterStats() {
+ @Override
+ public int getTotalLoad() {
+ return heartbeatManager.getXceiverCount();
+ }
+
+ @Override
+ public boolean isAvoidingStaleDataNodesForWrite() {
+ return shouldAvoidStaleDataNodesForWrite();
+ }
+
+ @Override
+ public int getNumDatanodesInService() {
+ return heartbeatManager.getNumDatanodesInService();
+ }
+
+ @Override
+ public double getInServiceXceiverAverage() {
+ double avgLoad = 0;
+ final int nodes = getNumDatanodesInService();
+ if (nodes != 0) {
+ final int xceivers = heartbeatManager
+ .getInServiceXceiverCount();
+ avgLoad = (double)xceivers/nodes;
+ }
+ return avgLoad;
+ }
+ };
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/FSClusterStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/FSClusterStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/FSClusterStats.java
new file mode 100644
index 0000000..556b7fc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/FSClusterStats.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * This interface is used for retrieving the load related statistics of
+ * the cluster.
+ */
+@InterfaceAudience.Private
+public interface FSClusterStats {
+
+ /**
+ * an indication of the total load of the cluster.
+ *
+ * @return a count of the total number of block transfers and block
+ * writes that are currently occuring on the cluster.
+ */
+ public int getTotalLoad();
+
+ /**
+ * Indicate whether or not the cluster is now avoiding
+ * to use stale DataNodes for writing.
+ *
+ * @return True if the cluster is currently avoiding using stale DataNodes
+ * for writing targets, and false otherwise.
+ */
+ public boolean isAvoidingStaleDataNodesForWrite();
+
+ /**
+ * Indicates number of datanodes that are in service.
+ * @return Number of datanodes that are both alive and not decommissioned.
+ */
+ public int getNumDatanodesInService();
+
+ /**
+ * an indication of the average load of non-decommission(ing|ed) nodes
+ * eligible for block placement
+ *
+ * @return average of the in service number of block transfers and block
+ * writes that are currently occurring on the cluster.
+ */
+ public double getInServiceXceiverAverage();
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java
deleted file mode 100644
index 1a859a7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * This interface is used for retrieving the load related statistics of
- * the cluster.
- */
-@InterfaceAudience.Private
-public interface FSClusterStats {
-
- /**
- * an indication of the total load of the cluster.
- *
- * @return a count of the total number of block transfers and block
- * writes that are currently occuring on the cluster.
- */
- public int getTotalLoad();
-
- /**
- * Indicate whether or not the cluster is now avoiding
- * to use stale DataNodes for writing.
- *
- * @return True if the cluster is currently avoiding using stale DataNodes
- * for writing targets, and false otherwise.
- */
- public boolean isAvoidingStaleDataNodesForWrite();
-
- /**
- * Indicates number of datanodes that are in service.
- * @return Number of datanodes that are both alive and not decommissioned.
- */
- public int getNumDatanodesInService();
-
- /**
- * an indication of the average load of non-decommission(ing|ed) nodes
- * eligible for block placement
- *
- * @return average of the in service number of block transfers and block
- * writes that are currently occurring on the cluster.
- */
- public double getInServiceXceiverAverage();
-}
-
-
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b086390..f1ea818 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -318,8 +318,8 @@ import com.google.common.collect.Lists;
*/
@InterfaceAudience.Private
@Metrics(context="dfs")
-public class FSNamesystem implements Namesystem, FSClusterStats,
- FSNamesystemMBean, NameNodeMXBean {
+public class FSNamesystem implements Namesystem, FSNamesystemMBean,
+ NameNodeMXBean {
public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
private static final ThreadLocal<StringBuilder> auditBuffer =
@@ -765,7 +765,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
- this.blockManager = new BlockManager(this, this, conf);
+ this.blockManager = new BlockManager(this, conf);
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
this.blockIdManager = new BlockIdManager(blockManager);
@@ -7818,28 +7818,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
this.nnResourceChecker = nnResourceChecker;
}
- @Override
- public boolean isAvoidingStaleDataNodesForWrite() {
- return this.blockManager.getDatanodeManager()
- .shouldAvoidStaleDataNodesForWrite();
- }
-
- @Override // FSClusterStats
- public int getNumDatanodesInService() {
- return datanodeStatistics.getNumDatanodesInService();
- }
-
- @Override // for block placement strategy
- public double getInServiceXceiverAverage() {
- double avgLoad = 0;
- final int nodes = getNumDatanodesInService();
- if (nodes != 0) {
- final int xceivers = datanodeStatistics.getInServiceXceiverCount();
- avgLoad = (double)xceivers/nodes;
- }
- return avgLoad;
- }
-
public SnapshotManager getSnapshotManager() {
return snapshotManager;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 14f2b59..3df890f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -81,19 +81,17 @@ public class TestBlockManager {
private static final int NUM_TEST_ITERS = 30;
private static final int BLOCK_SIZE = 64*1024;
-
- private Configuration conf;
+
private FSNamesystem fsn;
private BlockManager bm;
@Before
public void setupMockCluster() throws IOException {
- conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
- "need to set a dummy value here so it assumes a multi-rack cluster");
+ Configuration conf = new HdfsConfiguration();
+ conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "need to set a dummy value here so it assumes a multi-rack cluster");
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
- bm = new BlockManager(fsn, fsn, conf);
+ bm = new BlockManager(fsn, conf);
final String[] racks = {
"/rackA",
"/rackA",
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 4febd28..ce2328c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -1145,9 +1144,7 @@ public class TestReplicationPolicy {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
when(mockNS.hasWriteLock()).thenReturn(true);
- FSClusterStats mockStats = mock(FSClusterStats.class);
- BlockManager bm =
- new BlockManager(mockNS, mockStats, new HdfsConfiguration());
+ BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
Block block1 = new Block(random.nextLong());
@@ -1193,9 +1190,7 @@ public class TestReplicationPolicy {
throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
- FSClusterStats mockStats = mock(FSClusterStats.class);
- BlockManager bm =
- new BlockManager(mockNS, mockStats, new HdfsConfiguration());
+ BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
Block block1 = new Block(random.nextLong());
@@ -1248,9 +1243,7 @@ public class TestReplicationPolicy {
throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
- FSClusterStats mockStats = mock(FSClusterStats.class);
- BlockManager bm =
- new BlockManager(mockNS, mockStats, new HdfsConfiguration());
+ BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
Block block1 = new Block(random.nextLong());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
index 8a479c1..a1f3e38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
@@ -130,7 +130,8 @@ public class TestReplicationPolicyConsiderLoad {
final int load = 2 + 4 + 4;
FSNamesystem fsn = namenode.getNamesystem();
- assertEquals((double)load/6, fsn.getInServiceXceiverAverage(), EPSILON);
+ assertEquals((double)load/6, dnManager.getFSClusterStats()
+ .getInServiceXceiverAverage(), EPSILON);
// Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
// returns false
@@ -139,7 +140,8 @@ public class TestReplicationPolicyConsiderLoad {
dnManager.startDecommission(d);
d.setDecommissioned();
}
- assertEquals((double)load/3, fsn.getInServiceXceiverAverage(), EPSILON);
+ assertEquals((double)load/3, dnManager.getFSClusterStats()
+ .getInServiceXceiverAverage(), EPSILON);
// update references of writer DN to update the de-commissioned state
List<DatanodeDescriptor> liveNodes = new ArrayList<DatanodeDescriptor>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 4e07854..fa9dca1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -17,18 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.lang.management.ManagementFactory;
-import java.net.URI;
-import java.util.Collection;
-import java.util.Map;
-
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-
+import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@@ -43,6 +32,18 @@ import org.apache.hadoop.util.VersionInfo;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+import java.io.File;
+import java.lang.management.ManagementFactory;
+import java.net.URI;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
/**
* Class for testing {@link NameNodeMXBean} implementation
*/
@@ -62,14 +63,11 @@ public class TestNameNodeMXBean {
public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
- NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
- conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
-
+ NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
MiniDFSCluster cluster = null;
try {
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
@@ -77,29 +75,6 @@ public class TestNameNodeMXBean {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
-
- // Define include file to generate deadNodes metrics
- FileSystem localFileSys = FileSystem.getLocal(conf);
- Path workingDir = localFileSys.getWorkingDirectory();
- Path dir = new Path(workingDir,
- "build/test/data/temp/TestNameNodeMXBean");
- Path includeFile = new Path(dir, "include");
- assertTrue(localFileSys.mkdirs(dir));
- StringBuilder includeHosts = new StringBuilder();
- for(DataNode dn : cluster.getDataNodes()) {
- includeHosts.append(dn.getDisplayName()).append("\n");
- }
- DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
- conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
- fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
-
- cluster.stopDataNode(0);
- while (fsn.getNumDatanodesInService() != 2) {
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {}
- }
-
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
assertEquals(fsn.getClusterId(), clusterId);
@@ -127,8 +102,7 @@ public class TestNameNodeMXBean {
// get attribute percentremaining
Float percentremaining = (Float) (mbs.getAttribute(mxbeanName,
"PercentRemaining"));
- assertEquals(fsn.getPercentRemaining(), percentremaining
- .floatValue(), DELTA);
+ assertEquals(fsn.getPercentRemaining(), percentremaining, DELTA);
// get attribute Totalblocks
Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
@@ -151,15 +125,6 @@ public class TestNameNodeMXBean {
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
- Map<String, Map<String, Object>> deadNodes =
- (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
- assertTrue(deadNodes.size() > 0);
- for (Map<String, Object> deadNode : deadNodes.values()) {
- assertTrue(deadNode.containsKey("lastContact"));
- assertTrue(deadNode.containsKey("decommissioned"));
- assertTrue(deadNode.containsKey("xferaddr"));
- }
-
// get attribute NodeUsage
String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
"NodeUsage"));
@@ -233,4 +198,63 @@ public class TestNameNodeMXBean {
}
}
}
+
+ @SuppressWarnings({ "unchecked" })
+ @Test
+ public void testLastContactTime() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+ MiniDFSCluster cluster = null;
+
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster.waitActive();
+
+ FSNamesystem fsn = cluster.getNameNode().namesystem;
+
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=NameNode,name=NameNodeInfo");
+
+ // Define include file to generate deadNodes metrics
+ FileSystem localFileSys = FileSystem.getLocal(conf);
+ Path workingDir = localFileSys.getWorkingDirectory();
+ Path dir = new Path(workingDir,
+ "build/test/data/temp/TestNameNodeMXBean");
+ Path includeFile = new Path(dir, "include");
+ assertTrue(localFileSys.mkdirs(dir));
+ StringBuilder includeHosts = new StringBuilder();
+ for(DataNode dn : cluster.getDataNodes()) {
+ includeHosts.append(dn.getDisplayName()).append("\n");
+ }
+ DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
+ conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
+ fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
+
+ cluster.stopDataNode(0);
+ while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes()
+ != 2 ) {
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+ }
+
+ // get attribute deadnodeinfo
+ String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
+ "DeadNodes"));
+ assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+ Map<String, Map<String, Object>> deadNodes =
+ (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
+ assertTrue(deadNodes.size() > 0);
+ for (Map<String, Object> deadNode : deadNodes.values()) {
+ assertTrue(deadNode.containsKey("lastContact"));
+ assertTrue(deadNode.containsKey("decommissioned"));
+ assertTrue(deadNode.containsKey("xferaddr"));
+ }
+
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f6f9d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
index 15cad04..426563b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
@@ -193,11 +193,7 @@ public class TestNamenodeCapacityReport {
int expectedTotalLoad = nodes; // xceiver server adds 1 to load
int expectedInServiceNodes = nodes;
int expectedInServiceLoad = nodes;
- assertEquals(nodes, namesystem.getNumLiveDataNodes());
- assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
- assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
- assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
- namesystem.getInServiceXceiverAverage(), EPSILON);
+ checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
// shutdown half the nodes and force a heartbeat check to ensure
// counts are accurate
@@ -209,7 +205,7 @@ public class TestNamenodeCapacityReport {
BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
expectedInServiceNodes--;
assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
- assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
}
// restart the nodes to verify that counts are correct after
@@ -219,11 +215,7 @@ public class TestNamenodeCapacityReport {
datanodes = cluster.getDataNodes();
expectedInServiceNodes = nodes;
assertEquals(nodes, datanodes.size());
- assertEquals(nodes, namesystem.getNumLiveDataNodes());
- assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
- assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
- assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
- namesystem.getInServiceXceiverAverage(), EPSILON);
+ checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
// create streams and hsync to force datastreamers to start
DFSOutputStream[] streams = new DFSOutputStream[fileCount];
@@ -239,12 +231,7 @@ public class TestNamenodeCapacityReport {
}
// force nodes to send load update
triggerHeartbeats(datanodes);
- assertEquals(nodes, namesystem.getNumLiveDataNodes());
- assertEquals(expectedInServiceNodes,
- namesystem.getNumDatanodesInService());
- assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
- assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
- namesystem.getInServiceXceiverAverage(), EPSILON);
+ checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
// decomm a few nodes, substract their load from the expected load,
// trigger heartbeat to force load update
@@ -256,12 +243,7 @@ public class TestNamenodeCapacityReport {
dnm.startDecommission(dnd);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
Thread.sleep(100);
- assertEquals(nodes, namesystem.getNumLiveDataNodes());
- assertEquals(expectedInServiceNodes,
- namesystem.getNumDatanodesInService());
- assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
- assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
- namesystem.getInServiceXceiverAverage(), EPSILON);
+ checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
}
// check expected load while closing each stream. recalc expected
@@ -289,12 +271,7 @@ public class TestNamenodeCapacityReport {
}
triggerHeartbeats(datanodes);
// verify node count and loads
- assertEquals(nodes, namesystem.getNumLiveDataNodes());
- assertEquals(expectedInServiceNodes,
- namesystem.getNumDatanodesInService());
- assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
- assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
- namesystem.getInServiceXceiverAverage(), EPSILON);
+ checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
}
// shutdown each node, verify node counts based on decomm state
@@ -310,26 +287,49 @@ public class TestNamenodeCapacityReport {
if (i >= fileRepl) {
expectedInServiceNodes--;
}
- assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
// live nodes always report load of 1. no nodes is load 0
double expectedXceiverAvg = (i == nodes-1) ? 0.0 : 1.0;
assertEquals((double)expectedXceiverAvg,
- namesystem.getInServiceXceiverAverage(), EPSILON);
+ getInServiceXceiverAverage(namesystem), EPSILON);
}
// final sanity check
- assertEquals(0, namesystem.getNumLiveDataNodes());
- assertEquals(0, namesystem.getNumDatanodesInService());
- assertEquals(0.0, namesystem.getTotalLoad(), EPSILON);
- assertEquals(0.0, namesystem.getInServiceXceiverAverage(), EPSILON);
+ checkClusterHealth(0, namesystem, 0.0, 0, 0.0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
-
+
+ private static void checkClusterHealth(
+ int numOfLiveNodes,
+ FSNamesystem namesystem, double expectedTotalLoad,
+ int expectedInServiceNodes, double expectedInServiceLoad) {
+
+ assertEquals(numOfLiveNodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad(), EPSILON);
+ if (expectedInServiceNodes != 0) {
+ assertEquals(expectedInServiceLoad / expectedInServiceNodes,
+ getInServiceXceiverAverage(namesystem), EPSILON);
+ } else {
+ assertEquals(0.0, getInServiceXceiverAverage(namesystem), EPSILON);
+ }
+ }
+
+ private static int getNumDNInService(FSNamesystem fsn) {
+ return fsn.getBlockManager().getDatanodeManager().getFSClusterStats()
+ .getNumDatanodesInService();
+ }
+
+ private static double getInServiceXceiverAverage(FSNamesystem fsn) {
+ return fsn.getBlockManager().getDatanodeManager().getFSClusterStats()
+ .getInServiceXceiverAverage();
+ }
+
private void triggerHeartbeats(List<DataNode> datanodes)
throws IOException, InterruptedException {
for (DataNode dn : datanodes) {
[03/25] hadoop git commit: HADOOP-10786. Fix UGI#reloginFromKeytab on
Java 8. Contributed by Stephen Chu.
Posted by vi...@apache.org.
HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. Contributed by Stephen Chu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a37a9934
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a37a9934
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a37a9934
Branch: refs/heads/HDFS-EC
Commit: a37a993453c02048a618f71b5b9bc63b5a44dbf6
Parents: 43cd07b
Author: Haohui Mai <wh...@apache.org>
Authored: Sun Nov 9 17:48:26 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Sun Nov 9 17:48:26 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +
.../hadoop/security/UserGroupInformation.java | 38 ++++++--
.../hadoop/security/TestUGILoginFromKeytab.java | 91 ++++++++++++++++++++
3 files changed, 126 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a37a9934/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7ecee21..99010bd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -728,6 +728,8 @@ Release 2.6.0 - UNRELEASED
HADOOP-11247. Fix a couple javac warnings in NFS. (Brandon Li via wheat9)
+ HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
+
BUG FIXES
HADOOP-11182. GraphiteSink emits wrong timestamps (Sascha Coenen via raviprak)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a37a9934/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 45328c7..7a99391 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -88,9 +88,21 @@ public class UserGroupInformation {
* Percentage of the ticket window to use before we renew ticket.
*/
private static final float TICKET_RENEW_WINDOW = 0.80f;
+ private static boolean shouldRenewImmediatelyForTests = false;
static final String HADOOP_USER_NAME = "HADOOP_USER_NAME";
static final String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
-
+
+ /**
+ * For the purposes of unit tests, we want to test login
+ * from keytab and don't want to wait until the renew
+ * window (controlled by TICKET_RENEW_WINDOW).
+ * @param immediate true if we should login without waiting for ticket window
+ */
+ @VisibleForTesting
+ static void setShouldRenewImmediatelyForTests(boolean immediate) {
+ shouldRenewImmediatelyForTests = immediate;
+ }
+
/**
* UgiMetrics maintains UGI activity statistics
* and publishes them through the metrics interfaces.
@@ -598,6 +610,20 @@ public class UserGroupInformation {
user.setLogin(login);
}
+ private static Class<?> KEY_TAB_CLASS = KerberosKey.class;
+ static {
+ try {
+ // We use KEY_TAB_CLASS to determine if the UGI is logged in from
+ // keytab. In JDK6 and JDK7, if useKeyTab and storeKey are specified
+ // in the Krb5LoginModule, then some number of KerberosKey objects
+ // are added to the Subject's private credentials. However, in JDK8,
+ // a KeyTab object is added instead. More details in HADOOP-10786.
+ KEY_TAB_CLASS = Class.forName("javax.security.auth.kerberos.KeyTab");
+ } catch (ClassNotFoundException cnfe) {
+ // Ignore. javax.security.auth.kerberos.KeyTab does not exist in JDK6.
+ }
+ }
+
/**
* Create a UserGroupInformation for the given subject.
* This does not change the subject or acquire new credentials.
@@ -606,7 +632,7 @@ public class UserGroupInformation {
UserGroupInformation(Subject subject) {
this.subject = subject;
this.user = subject.getPrincipals(User.class).iterator().next();
- this.isKeytab = !subject.getPrivateCredentials(KerberosKey.class).isEmpty();
+ this.isKeytab = !subject.getPrivateCredentials(KEY_TAB_CLASS).isEmpty();
this.isKrbTkt = !subject.getPrivateCredentials(KerberosTicket.class).isEmpty();
}
@@ -962,7 +988,8 @@ public class UserGroupInformation {
|| !isKeytab)
return;
KerberosTicket tgt = getTGT();
- if (tgt != null && Time.now() < getRefreshTime(tgt)) {
+ if (tgt != null && !shouldRenewImmediatelyForTests &&
+ Time.now() < getRefreshTime(tgt)) {
return;
}
reloginFromKeytab();
@@ -987,13 +1014,14 @@ public class UserGroupInformation {
return;
long now = Time.now();
- if (!hasSufficientTimeElapsed(now)) {
+ if (!shouldRenewImmediatelyForTests && !hasSufficientTimeElapsed(now)) {
return;
}
KerberosTicket tgt = getTGT();
//Return if TGT is valid and is not going to expire soon.
- if (tgt != null && now < getRefreshTime(tgt)) {
+ if (tgt != null && !shouldRenewImmediatelyForTests &&
+ now < getRefreshTime(tgt)) {
return;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a37a9934/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
new file mode 100644
index 0000000..61fbf89
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+
+/**
+ * Verify UGI login from keytab. Check that the UGI is
+ * configured to use keytab to catch regressions like
+ * HADOOP-10786.
+ */
+public class TestUGILoginFromKeytab {
+
+ private MiniKdc kdc;
+ private File workDir;
+
+ @Rule
+ public final TemporaryFolder folder = new TemporaryFolder();
+
+ @Before
+ public void startMiniKdc() throws Exception {
+ // This setting below is required. If not enabled, UGI will abort
+ // any attempt to loginUserFromKeytab.
+ Configuration conf = new Configuration();
+ conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+ workDir = folder.getRoot();
+ kdc = new MiniKdc(MiniKdc.createConf(), workDir);
+ kdc.start();
+ }
+
+ @After
+ public void stopMiniKdc() {
+ if (kdc != null) {
+ kdc.stop();
+ }
+ }
+
+ /**
+ * Login from keytab using the MiniKDC and verify the UGI can successfully
+ * relogin from keytab as well. This will catch regressions like HADOOP-10786.
+ */
+ @Test
+ public void testUGILoginFromKeytab() throws Exception {
+ UserGroupInformation.setShouldRenewImmediatelyForTests(true);
+ String principal = "foo";
+ File keytab = new File(workDir, "foo.keytab");
+ kdc.createPrincipal(keytab, principal);
+
+ UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
+ UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+ Assert.assertTrue("UGI should be configured to login from keytab",
+ ugi.isFromKeytab());
+
+ // Verify relogin from keytab.
+ User user = ugi.getSubject().getPrincipals(User.class).iterator().next();
+ final long firstLogin = user.getLastLogin();
+ ugi.reloginFromKeytab();
+ final long secondLogin = user.getLastLogin();
+ Assert.assertTrue("User should have been able to relogin from keytab",
+ secondLogin > firstLogin);
+ }
+
+}
[09/25] hadoop git commit: Fix broken download of directories
Posted by vi...@apache.org.
Fix broken download of directories
(cherry picked from commit 745c9a01243b8eefc72d89d1164d7d010b80983b)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b31b4bf0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b31b4bf0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b31b4bf0
Branch: refs/heads/HDFS-EC
Commit: b31b4bf029acdceaf6723e40ee29224bb5d38895
Parents: e76faeb
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Nov 5 10:12:56 2014 -0800
Committer: Arun C. Murthy <ac...@apache.org>
Committed: Sun Nov 9 19:03:49 2014 -0800
----------------------------------------------------------------------
.../org/apache/hadoop/yarn/util/FSDownload.java | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b31b4bf0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
index fb37701..2737cce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
@@ -1,4 +1,4 @@
-/**
+ /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -28,6 +28,7 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.regex.Pattern;
+import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
@@ -389,17 +390,22 @@ public class FSDownload implements Callable<Path> {
*/
private void changePermissions(FileSystem fs, final Path path)
throws IOException, InterruptedException {
- FileStatus fStatus = fs.getFileStatus(path);
+ File f = new File(path.toUri());
+ if (FileUtils.isSymlink(f)) {
+ // avoid following symlinks when changing permissions
+ return;
+ }
+ boolean isDir = f.isDirectory();
FsPermission perm = cachePerms;
// set public perms as 755 or 555 based on dir or file
if (resource.getVisibility() == LocalResourceVisibility.PUBLIC) {
- perm = fStatus.isDirectory() ? PUBLIC_DIR_PERMS : PUBLIC_FILE_PERMS;
+ perm = isDir ? PUBLIC_DIR_PERMS : PUBLIC_FILE_PERMS;
}
// set private perms as 700 or 500
else {
// PRIVATE:
// APPLICATION:
- perm = fStatus.isDirectory() ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS;
+ perm = isDir ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS;
}
LOG.debug("Changing permissions for path " + path
+ " to perm " + perm);
@@ -415,8 +421,7 @@ public class FSDownload implements Callable<Path> {
}
});
}
- if (fStatus.isDirectory()
- && !fStatus.isSymlink()) {
+ if (isDir) {
FileStatus[] statuses = fs.listStatus(path);
for (FileStatus status : statuses) {
changePermissions(fs, status.getPath());
[02/25] hadoop git commit: YARN-2830. Add backwords compatible
ContainerId.newInstance constructor. Contributed by Jonathan Eagles.
Posted by vi...@apache.org.
YARN-2830. Add backwords compatible ContainerId.newInstance constructor. Contributed by Jonathan Eagles.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43cd07b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43cd07b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43cd07b4
Branch: refs/heads/HDFS-EC
Commit: 43cd07b408c6613d2c9aa89203cfa3110d830538
Parents: 9ba8d8c
Author: Arun C. Murthy <ac...@apache.org>
Authored: Sun Nov 9 14:57:37 2014 -0800
Committer: Arun C. Murthy <ac...@apache.org>
Committed: Sun Nov 9 14:57:37 2014 -0800
----------------------------------------------------------------------
.../v2/app/local/LocalContainerAllocator.java | 2 +-
.../jobhistory/TestJobHistoryEventHandler.java | 2 +-
.../apache/hadoop/mapreduce/v2/app/MRApp.java | 6 ++--
.../hadoop/mapreduce/v2/app/MRAppBenchmark.java | 4 +--
.../hadoop/mapreduce/v2/app/MockJobs.java | 6 ++--
.../v2/app/TestCheckpointPreemptionPolicy.java | 2 +-
.../v2/app/TestKillAMPreemptionPolicy.java | 2 +-
.../mapreduce/v2/app/TestMRAppMaster.java | 2 +-
.../mapreduce/v2/app/TestStagingCleanup.java | 2 +-
.../v2/app/job/impl/TestTaskAttempt.java | 22 +++++++-------
.../v2/app/launcher/TestContainerLauncher.java | 8 ++---
.../app/launcher/TestContainerLauncherImpl.java | 2 +-
.../v2/app/rm/TestRMContainerAllocator.java | 10 +++---
.../mapreduce/v2/hs/webapp/TestBlocks.java | 2 +-
.../v2/TestMRJobsWithHistoryService.java | 2 +-
hadoop-yarn-project/CHANGES.txt | 3 ++
.../hadoop/yarn/api/records/ContainerId.java | 16 ++++++++--
.../UnmanagedAMLauncher.java | 2 +-
.../hadoop/yarn/client/ProtocolHATestBase.java | 2 +-
.../api/async/impl/TestAMRMClientAsync.java | 2 +-
.../api/async/impl/TestNMClientAsync.java | 2 +-
.../yarn/client/api/impl/TestAHSClient.java | 16 +++++-----
.../api/impl/TestAMRMClientOnRMRestart.java | 2 +-
.../yarn/client/api/impl/TestYarnClient.java | 16 +++++-----
.../hadoop/yarn/client/cli/TestLogsCLI.java | 6 ++--
.../hadoop/yarn/client/cli/TestYarnCLI.java | 14 ++++-----
.../hadoop/yarn/TestContainerLaunchRPC.java | 2 +-
.../java/org/apache/hadoop/yarn/TestRPC.java | 2 +-
.../apache/hadoop/yarn/api/TestContainerId.java | 2 +-
.../yarn/api/TestContainerResourceDecrease.java | 2 +-
.../yarn/api/TestContainerResourceIncrease.java | 2 +-
.../TestContainerResourceIncreaseRequest.java | 2 +-
.../logaggregation/TestAggregatedLogFormat.java | 2 +-
.../logaggregation/TestAggregatedLogsBlock.java | 2 +-
.../yarn/security/TestYARNTokenIdentifier.java | 2 +-
.../ApplicationHistoryStoreTestUtils.java | 2 +-
.../TestApplicationHistoryClientService.java | 6 ++--
...pplicationHistoryManagerOnTimelineStore.java | 8 ++---
.../TestFileSystemApplicationHistoryStore.java | 10 +++---
.../TestMemoryApplicationHistoryStore.java | 12 ++++----
.../webapp/TestAHSWebApp.java | 4 +--
.../webapp/TestAHSWebServices.java | 2 +-
.../hadoop/yarn/server/utils/BuilderUtils.java | 4 +--
.../hadoop/yarn/TestYarnServerApiClasses.java | 2 +-
.../protocolrecords/TestProtocolRecords.java | 4 +--
.../TestRegisterNodeManagerRequest.java | 2 +-
.../yarn/server/nodemanager/TestEventFlow.java | 2 +-
.../nodemanager/TestLinuxContainerExecutor.java | 2 +-
.../nodemanager/TestNodeManagerReboot.java | 2 +-
.../nodemanager/TestNodeManagerResync.java | 2 +-
.../nodemanager/TestNodeManagerShutdown.java | 2 +-
.../nodemanager/TestNodeStatusUpdater.java | 14 ++++-----
.../containermanager/TestAuxServices.java | 2 +-
.../containermanager/TestContainerManager.java | 2 +-
.../TestContainerManagerRecovery.java | 2 +-
.../launcher/TestContainerLaunch.java | 8 ++---
.../monitor/TestContainersMonitor.java | 2 +-
.../TestNMLeveldbStateStoreService.java | 2 +-
.../resourcemanager/TestApplicationCleanup.java | 2 +-
.../resourcemanager/TestClientRMService.java | 6 ++--
.../TestContainerResourceUsage.java | 4 +--
.../server/resourcemanager/TestRMRestart.java | 2 +-
.../TestResourceTrackerService.java | 8 ++---
.../TestWorkPreservingRMRestart.java | 2 +-
.../ahs/TestRMApplicationHistoryWriter.java | 12 ++++----
.../applicationsmanager/MockAsm.java | 2 +-
.../applicationsmanager/TestAMRestart.java | 22 +++++++-------
.../metrics/TestSystemMetricsPublisher.java | 4 +--
...estProportionalCapacityPreemptionPolicy.java | 2 +-
.../attempt/TestRMAppAttemptTransitions.java | 2 +-
.../rmcontainer/TestRMContainerImpl.java | 2 +-
.../TestSchedulerApplicationAttempt.java | 2 +-
.../scheduler/TestSchedulerUtils.java | 4 +--
.../capacity/TestCapacityScheduler.java | 4 +--
.../capacity/TestContainerAllocation.java | 32 ++++++++++----------
.../scheduler/fair/TestFairScheduler.java | 2 +-
.../server/TestContainerManagerSecurity.java | 12 ++++----
77 files changed, 206 insertions(+), 191 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
index 19efe17..74dfb39 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
@@ -140,7 +140,7 @@ public class LocalContainerAllocator extends RMCommunicator
LOG.info("Processing the event " + event.toString());
// Assign the same container ID as the AM
ContainerId cID =
- ContainerId.newInstance(getContext().getApplicationAttemptId(),
+ ContainerId.newContainerId(getContext().getApplicationAttemptId(),
this.containerId.getContainerId());
Container container = recordFactory.newRecordInstance(Container.class);
container.setId(cID);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 1edadb9..de35d84 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -716,7 +716,7 @@ public class TestJobHistoryEventHandler {
ApplicationId appId = ApplicationId.newInstance(200, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
TaskID taskID = TaskID.forName("task_200707121733_0003_m_000005");
TaskAttemptID taskAttemptID = new TaskAttemptID(taskID, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
index accd594..58db925 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
@@ -180,7 +180,7 @@ public class MRApp extends MRAppMaster {
ApplicationAttemptId appAttemptId =
getApplicationAttemptId(applicationId, startCount);
ContainerId containerId =
- ContainerId.newInstance(appAttemptId, startCount);
+ ContainerId.newContainerId(appAttemptId, startCount);
return containerId;
}
@@ -567,7 +567,7 @@ public class MRApp extends MRAppMaster {
@Override
public void handle(ContainerAllocatorEvent event) {
ContainerId cId =
- ContainerId.newInstance(getContext().getApplicationAttemptId(),
+ ContainerId.newContainerId(getContext().getApplicationAttemptId(),
containerCount++);
NodeId nodeId = NodeId.newInstance(NM_HOST, NM_PORT);
Resource resource = Resource.newInstance(1234, 2);
@@ -775,7 +775,7 @@ public class MRApp extends MRAppMaster {
ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, appAttemptId);
- return ContainerId.newInstance(applicationAttemptId, containerId);
+ return ContainerId.newContainerId(applicationAttemptId, containerId);
}
public static ContainerTokenIdentifier newContainerTokenIdentifier(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 4c63d34..322984e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -145,7 +145,7 @@ public class MRAppBenchmark {
if (concurrentRunningTasks < maxConcurrentRunningTasks) {
event = eventQueue.take();
ContainerId cId =
- ContainerId.newInstance(getContext()
+ ContainerId.newContainerId(getContext()
.getApplicationAttemptId(), containerCount++);
//System.out.println("Allocating " + containerCount);
@@ -240,7 +240,7 @@ public class MRAppBenchmark {
int numContainers = req.getNumContainers();
for (int i = 0; i < numContainers; i++) {
ContainerId containerId =
- ContainerId.newInstance(
+ ContainerId.newContainerId(
getContext().getApplicationAttemptId(),
request.getResponseId() + i);
containers.add(Container.newInstance(containerId,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
index 19ac0db..fd9c094 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
@@ -183,7 +183,7 @@ public class MockJobs extends MockApps {
public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
id.getTaskId().getJobId().getAppId(), 0);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
report.setTaskAttemptId(id);
report
@@ -315,7 +315,7 @@ public class MockJobs extends MockApps {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(taid.getTaskId().getJobId()
.getAppId(), 0);
- ContainerId id = ContainerId.newInstance(appAttemptId, 0);
+ ContainerId id = ContainerId.newContainerId(appAttemptId, 0);
return id;
}
@@ -640,7 +640,7 @@ public class MockJobs extends MockApps {
private static AMInfo createAMInfo(int attempt) {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(100, 1), attempt);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
return MRBuilderUtils.newAMInfo(appAttemptId, System.currentTimeMillis(),
containerId, NM_HOST, NM_PORT, NM_HTTP_PORT);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java
index 5e01596..59ce4a5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java
@@ -88,7 +88,7 @@ public class TestCheckpointPreemptionPolicy {
EventHandler ea = mock(EventHandler.class);
when(mActxt.getEventHandler()).thenReturn(ea);
for (int i = 0; i < 40; ++i) {
- ContainerId cId = ContainerId.newInstance(appAttemptId, i);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, i);
if (0 == i % 7) {
preemptedContainers.add(cId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java
index a4a8ac3..647d527 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java
@@ -57,7 +57,7 @@ public class TestKillAMPreemptionPolicy {
public void testKillAMPreemptPolicy() {
ApplicationId appId = ApplicationId.newInstance(123456789, 1);
- ContainerId container = ContainerId.newInstance(
+ ContainerId container = ContainerId.newContainerId(
ApplicationAttemptId.newInstance(appId, 1), 1);
AMPreemptionPolicy.Context mPctxt = mock(AMPreemptionPolicy.Context.class);
when(mPctxt.getTaskAttempt(any(ContainerId.class))).thenReturn(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
index d356eca..70437c1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
@@ -382,7 +382,7 @@ public class TestMRAppMaster {
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId =
- ContainerId.newInstance(applicationAttemptId, 546);
+ ContainerId.newContainerId(applicationAttemptId, 546);
String userName = UserGroupInformation.getCurrentUser().getShortUserName();
// Create staging dir, so MRAppMaster doesn't barf.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
index 1037e7c..fc64996 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
@@ -253,7 +253,7 @@ import org.junit.Test;
public TestMRApp(ApplicationAttemptId applicationAttemptId,
ContainerAllocator allocator) {
- super(applicationAttemptId, ContainerId.newInstance(
+ super(applicationAttemptId, ContainerId.newContainerId(
applicationAttemptId, 1), "testhost", 2222, 3333,
System.currentTimeMillis());
this.allocator = allocator;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
index 1330344..1807c1c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -359,7 +359,7 @@ public class TestTaskAttempt{
new SystemClock(), null);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -415,7 +415,7 @@ public class TestTaskAttempt{
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -472,7 +472,7 @@ public class TestTaskAttempt{
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -532,7 +532,7 @@ public class TestTaskAttempt{
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -599,7 +599,7 @@ public class TestTaskAttempt{
new Token(), new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -649,7 +649,7 @@ public class TestTaskAttempt{
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -714,7 +714,7 @@ public class TestTaskAttempt{
new Token(), new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -760,7 +760,7 @@ public class TestTaskAttempt{
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -830,7 +830,7 @@ public class TestTaskAttempt{
new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -884,7 +884,7 @@ public class TestTaskAttempt{
new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
@@ -941,7 +941,7 @@ public class TestTaskAttempt{
new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
- ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
+ ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
index f2c1841..dc1d72f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
@@ -115,7 +115,7 @@ public class TestContainerLauncher {
containerLauncher.expectedCorePoolSize = ContainerLauncherImpl.INITIAL_POOL_SIZE;
for (int i = 0; i < 10; i++) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, i);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host" + i + ":1234", null,
@@ -137,7 +137,7 @@ public class TestContainerLauncher {
Assert.assertEquals(10, containerLauncher.numEventsProcessed.get());
containerLauncher.finishEventHandling = false;
for (int i = 0; i < 10; i++) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId,
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId,
i + 10);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId,
i + 10);
@@ -154,7 +154,7 @@ public class TestContainerLauncher {
// Core pool size should be 21 but the live pool size should be only 11.
containerLauncher.expectedCorePoolSize = 11 + ContainerLauncherImpl.INITIAL_POOL_SIZE;
containerLauncher.finishEventHandling = false;
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 21);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 21);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 21);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host11:1234", null,
@@ -174,7 +174,7 @@ public class TestContainerLauncher {
JobId jobId = MRBuilderUtils.newJobId(appId, 8);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 10);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10);
AppContext context = mock(AppContext.class);
CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
index 74e532a..184f1b2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
@@ -139,7 +139,7 @@ public class TestContainerLauncherImpl {
public static ContainerId makeContainerId(long ts, int appId, int attemptId,
int id) {
- return ContainerId.newInstance(
+ return ContainerId.newContainerId(
ApplicationAttemptId.newInstance(
ApplicationId.newInstance(ts, appId), attemptId), id);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index fdc456a..3642670 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -689,7 +689,7 @@ public class TestRMContainerAllocator {
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
- MRApp mrApp = new MRApp(appAttemptId, ContainerId.newInstance(
+ MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId(
appAttemptId, 0), 10, 10, false, this.getClass().getName(), true, 1) {
@Override
protected Dispatcher createDispatcher() {
@@ -841,7 +841,7 @@ public class TestRMContainerAllocator {
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
- MRApp mrApp = new MRApp(appAttemptId, ContainerId.newInstance(
+ MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId(
appAttemptId, 0), 10, 0, false, this.getClass().getName(), true, 1) {
@Override
protected Dispatcher createDispatcher() {
@@ -2026,7 +2026,7 @@ public class TestRMContainerAllocator {
ApplicationId applicationId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(
applicationId, 1);
- ContainerId containerId = ContainerId.newInstance(applicationAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, 1);
ContainerStatus status = ContainerStatus.newInstance(
containerId, ContainerState.RUNNING, "", 0);
@@ -2043,7 +2043,7 @@ public class TestRMContainerAllocator {
abortedStatus, attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType());
- ContainerId containerId2 = ContainerId.newInstance(applicationAttemptId, 2);
+ ContainerId containerId2 = ContainerId.newContainerId(applicationAttemptId, 2);
ContainerStatus status2 = ContainerStatus.newInstance(containerId2,
ContainerState.RUNNING, "", 0);
@@ -2082,7 +2082,7 @@ public class TestRMContainerAllocator {
rmDispatcher.await();
MRApp mrApp =
- new MRApp(appAttemptId, ContainerId.newInstance(appAttemptId, 0), 10,
+ new MRApp(appAttemptId, ContainerId.newContainerId(appAttemptId, 0), 10,
0, false, this.getClass().getName(), true, 1) {
@Override
protected Dispatcher createDispatcher() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java
index 82d578a..7231367 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java
@@ -133,7 +133,7 @@ public class TestBlocks {
ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 5);
ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1);
- ContainerId containerId = ContainerIdPBImpl.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerIdPBImpl.newContainerId(appAttemptId, 1);
when(attempt.getAssignedContainerID()).thenReturn(containerId);
when(attempt.getAssignedContainerMgrAddress()).thenReturn(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
index 9fba91d..f9236a9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
@@ -169,7 +169,7 @@ public class TestMRJobsWithHistoryService {
Assert.assertEquals(1, amInfos.size());
AMInfo amInfo = amInfos.get(0);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(jobId.getAppId(), 1);
- ContainerId amContainerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId amContainerId = ContainerId.newContainerId(appAttemptId, 1);
Assert.assertEquals(appAttemptId, amInfo.getAppAttemptId());
Assert.assertEquals(amContainerId, amInfo.getContainerId());
Assert.assertTrue(jobReport.getSubmitTime() > 0);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9adfb8c..4ea0726 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -922,6 +922,9 @@ Release 2.6.0 - UNRELEASED
YARN-2607. Fixed issues in TestDistributedShell. (Wangda Tan via vinodkv)
+ YARN-2830. Add backwords compatible ContainerId.newInstance constructor.
+ (jeagles via acmurthy)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
index 5499a19..5d0d65a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
@@ -42,7 +42,7 @@ public abstract class ContainerId implements Comparable<ContainerId>{
@Private
@Unstable
- public static ContainerId newInstance(ApplicationAttemptId appAttemptId,
+ public static ContainerId newContainerId(ApplicationAttemptId appAttemptId,
long containerId) {
ContainerId id = Records.newRecord(ContainerId.class);
id.setContainerId(containerId);
@@ -51,6 +51,18 @@ public abstract class ContainerId implements Comparable<ContainerId>{
return id;
}
+ @Private
+ @Deprecated
+ @Unstable
+ public static ContainerId newInstance(ApplicationAttemptId appAttemptId,
+ int containerId) {
+ ContainerId id = Records.newRecord(ContainerId.class);
+ id.setContainerId(containerId);
+ id.setApplicationAttemptId(appAttemptId);
+ id.build();
+ return id;
+ }
+
/**
* Get the <code>ApplicationAttemptId</code> of the application to which the
* <code>Container</code> was assigned.
@@ -214,7 +226,7 @@ public abstract class ContainerId implements Comparable<ContainerId>{
}
long id = Long.parseLong(it.next());
long cid = (epoch << 40) | id;
- ContainerId containerId = ContainerId.newInstance(appAttemptID, cid);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptID, cid);
return containerId;
} catch (NumberFormatException n) {
throw new IllegalArgumentException("Invalid ContainerId: "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java
index 2414a67..d41434e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java
@@ -214,7 +214,7 @@ public class UnmanagedAMLauncher {
if(!setClasspath && classpath!=null) {
envAMList.add("CLASSPATH="+classpath);
}
- ContainerId containerId = ContainerId.newInstance(attemptId, 0);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, 0);
String hostname = InetAddress.getLocalHost().getHostName();
envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index ec00d45..da7d505 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -667,7 +667,7 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
}
public ContainerId createFakeContainerId() {
- return ContainerId.newInstance(createFakeApplicationAttemptId(), 0);
+ return ContainerId.newContainerId(createFakeApplicationAttemptId(), 0);
}
public YarnClusterMetrics createFakeYarnClusterMetrics() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
index b00598a..74d4aa4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
@@ -402,7 +402,7 @@ public class TestAMRMClientAsync {
ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, appAttemptId);
- return ContainerId.newInstance(applicationAttemptId, containerId);
+ return ContainerId.newContainerId(applicationAttemptId, containerId);
}
private class TestCallbackHandler implements AMRMClientAsync.CallbackHandler {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
index 0e059d7..6f9d41d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
@@ -547,7 +547,7 @@ public class TestNMClientAsync {
ApplicationId.newInstance(System.currentTimeMillis(), 1);
ApplicationAttemptId attemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(attemptId, i);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, i);
nodeId = NodeId.newInstance("localhost", 0);
// Create an empty record
containerToken = recordFactory.newRecordInstance(Token.class);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
index d3c182b..a88189e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
@@ -157,9 +157,9 @@ public class TestAHSClient {
List<ContainerReport> reports = client.getContainers(appAttemptId);
Assert.assertNotNull(reports);
Assert.assertEquals(reports.get(0).getContainerId(),
- (ContainerId.newInstance(appAttemptId, 1)));
+ (ContainerId.newContainerId(appAttemptId, 1)));
Assert.assertEquals(reports.get(1).getContainerId(),
- (ContainerId.newInstance(appAttemptId, 2)));
+ (ContainerId.newContainerId(appAttemptId, 2)));
client.stop();
}
@@ -176,11 +176,11 @@ public class TestAHSClient {
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
ContainerReport report = client.getContainerReport(containerId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getContainerId().toString(), (ContainerId
- .newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(), 1))
+ .newContainerId(expectedReports.get(0).getCurrentApplicationAttemptId(), 1))
.toString());
client.stop();
}
@@ -349,7 +349,7 @@ public class TestAHSClient {
"oUrl",
"diagnostics",
YarnApplicationAttemptState.FINISHED,
- ContainerId.newInstance(
+ ContainerId.newContainerId(
newApplicationReport.getCurrentApplicationAttemptId(), 1));
appAttempts.add(attempt);
ApplicationAttemptReport attempt1 =
@@ -361,7 +361,7 @@ public class TestAHSClient {
"oUrl",
"diagnostics",
YarnApplicationAttemptState.FINISHED,
- ContainerId.newInstance(
+ ContainerId.newContainerId(
newApplicationReport.getCurrentApplicationAttemptId(), 2));
appAttempts.add(attempt1);
attempts.put(applicationId, appAttempts);
@@ -369,14 +369,14 @@ public class TestAHSClient {
List<ContainerReport> containerReports = new ArrayList<ContainerReport>();
ContainerReport container =
ContainerReport.newInstance(
- ContainerId.newInstance(attempt.getApplicationAttemptId(), 1),
+ ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1),
null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234,
5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
containerReports.add(container);
ContainerReport container1 =
ContainerReport.newInstance(
- ContainerId.newInstance(attempt.getApplicationAttemptId(), 2),
+ ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2),
null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234,
5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
containerReports.add(container1);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
index ce3086f..108ad37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
@@ -352,7 +352,7 @@ public class TestAMRMClientOnRMRestart {
// new NM to represent NM re-register
nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService());
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, ContainerState.RUNNING,
Resource.newInstance(1024, 1), "recover container", 0,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index ca7c50a..02f2882 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -348,9 +348,9 @@ public class TestYarnClient {
List<ContainerReport> reports = client.getContainers(appAttemptId);
Assert.assertNotNull(reports);
Assert.assertEquals(reports.get(0).getContainerId(),
- (ContainerId.newInstance(appAttemptId, 1)));
+ (ContainerId.newContainerId(appAttemptId, 1)));
Assert.assertEquals(reports.get(1).getContainerId(),
- (ContainerId.newInstance(appAttemptId, 2)));
+ (ContainerId.newContainerId(appAttemptId, 2)));
client.stop();
}
@@ -367,11 +367,11 @@ public class TestYarnClient {
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
applicationId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
ContainerReport report = client.getContainerReport(containerId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getContainerId().toString(),
- (ContainerId.newInstance(expectedReports.get(0)
+ (ContainerId.newContainerId(expectedReports.get(0)
.getCurrentApplicationAttemptId(), 1)).toString());
client.stop();
}
@@ -481,7 +481,7 @@ public class TestYarnClient {
"oUrl",
"diagnostics",
YarnApplicationAttemptState.FINISHED,
- ContainerId.newInstance(
+ ContainerId.newContainerId(
newApplicationReport.getCurrentApplicationAttemptId(), 1));
appAttempts.add(attempt);
ApplicationAttemptReport attempt1 = ApplicationAttemptReport.newInstance(
@@ -492,20 +492,20 @@ public class TestYarnClient {
"oUrl",
"diagnostics",
YarnApplicationAttemptState.FINISHED,
- ContainerId.newInstance(
+ ContainerId.newContainerId(
newApplicationReport.getCurrentApplicationAttemptId(), 2));
appAttempts.add(attempt1);
attempts.put(applicationId, appAttempts);
List<ContainerReport> containerReports = new ArrayList<ContainerReport>();
ContainerReport container = ContainerReport.newInstance(
- ContainerId.newInstance(attempt.getApplicationAttemptId(), 1), null,
+ ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null,
NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
"diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
containerReports.add(container);
ContainerReport container1 = ContainerReport.newInstance(
- ContainerId.newInstance(attempt.getApplicationAttemptId(), 2), null,
+ ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2), null,
NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
"diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
containerReports.add(container1);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index 5ed8398..ef9439d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -172,9 +172,9 @@ public class TestLogsCLI {
ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptIdPBImpl.newInstance(appId, 1);
- ContainerId containerId0 = ContainerIdPBImpl.newInstance(appAttemptId, 0);
- ContainerId containerId1 = ContainerIdPBImpl.newInstance(appAttemptId, 1);
- ContainerId containerId2 = ContainerIdPBImpl.newInstance(appAttemptId, 2);
+ ContainerId containerId0 = ContainerIdPBImpl.newContainerId(appAttemptId, 0);
+ ContainerId containerId1 = ContainerIdPBImpl.newContainerId(appAttemptId, 1);
+ ContainerId containerId2 = ContainerIdPBImpl.newContainerId(appAttemptId, 2);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
// create local logs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index d87277a..9d9a86a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -146,7 +146,7 @@ public class TestYarnCLI {
applicationId, 1);
ApplicationAttemptReport attemptReport = ApplicationAttemptReport
.newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics",
- YarnApplicationAttemptState.FINISHED, ContainerId.newInstance(
+ YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId(
attemptId, 1));
when(
client
@@ -182,11 +182,11 @@ public class TestYarnCLI {
applicationId, 2);
ApplicationAttemptReport attemptReport = ApplicationAttemptReport
.newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics",
- YarnApplicationAttemptState.FINISHED, ContainerId.newInstance(
+ YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId(
attemptId, 1));
ApplicationAttemptReport attemptReport1 = ApplicationAttemptReport
.newInstance(attemptId1, "host", 124, "url", "oUrl", "diagnostics",
- YarnApplicationAttemptState.FINISHED, ContainerId.newInstance(
+ YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId(
attemptId1, 1));
List<ApplicationAttemptReport> reports = new ArrayList<ApplicationAttemptReport>();
reports.add(attemptReport);
@@ -223,7 +223,7 @@ public class TestYarnCLI {
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
applicationId, 1);
- ContainerId containerId = ContainerId.newInstance(attemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
ContainerReport container = ContainerReport.newInstance(containerId, null,
NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
"diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
@@ -255,8 +255,8 @@ public class TestYarnCLI {
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
applicationId, 1);
- ContainerId containerId = ContainerId.newInstance(attemptId, 1);
- ContainerId containerId1 = ContainerId.newInstance(attemptId, 2);
+ ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
+ ContainerId containerId1 = ContainerId.newContainerId(attemptId, 2);
ContainerReport container = ContainerReport.newInstance(containerId, null,
NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
"diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
@@ -766,7 +766,7 @@ public class TestYarnCLI {
sysOutStream.toString());
sysOutStream.reset();
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 7);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 7);
result = cli.run(
new String[] { "container", "-status", containerId.toString(), "args" });
verify(spyCli).printUsage(any(String.class), any(Options.class));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
index 45b2a06..e2071dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
@@ -97,7 +97,7 @@ public class TestContainerLaunchRPC {
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId containerId =
- ContainerId.newInstance(applicationAttemptId, 100);
+ ContainerId.newContainerId(applicationAttemptId, 100);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
index 8271713..39e6162 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
@@ -124,7 +124,7 @@ public class TestRPC {
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId containerId =
- ContainerId.newInstance(applicationAttemptId, 100);
+ ContainerId.newContainerId(applicationAttemptId, 100);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
index 2259294..1643301 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
@@ -79,6 +79,6 @@ public class TestContainerId {
ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, appAttemptId);
- return ContainerId.newInstance(applicationAttemptId, containerId);
+ return ContainerId.newContainerId(applicationAttemptId, containerId);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
index f497d27..29b0ffe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java
@@ -33,7 +33,7 @@ public class TestContainerResourceDecrease {
@Test
public void testResourceDecreaseContext() {
ContainerId containerId = ContainerId
- .newInstance(ApplicationAttemptId.newInstance(
+ .newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234, 3), 3), 7);
Resource resource = Resource.newInstance(1023, 3);
ContainerResourceDecrease ctx = ContainerResourceDecrease.newInstance(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
index d307e39..932d5a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java
@@ -38,7 +38,7 @@ public class TestContainerResourceIncrease {
byte[] identifier = new byte[] { 1, 2, 3, 4 };
Token token = Token.newInstance(identifier, "", "".getBytes(), "");
ContainerId containerId = ContainerId
- .newInstance(ApplicationAttemptId.newInstance(
+ .newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234, 3), 3), 7);
Resource resource = Resource.newInstance(1023, 3);
ContainerResourceIncrease ctx = ContainerResourceIncrease.newInstance(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java
index 0acad00..cf4dabf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java
@@ -33,7 +33,7 @@ public class TestContainerResourceIncreaseRequest {
@Test
public void ContainerResourceIncreaseRequest() {
ContainerId containerId = ContainerId
- .newInstance(ApplicationAttemptId.newInstance(
+ .newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234, 3), 3), 7);
Resource resource = Resource.newInstance(1023, 3);
ContainerResourceIncreaseRequest context = ContainerResourceIncreaseRequest
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index 405cb3d..4301bc9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -295,7 +295,7 @@ public class TestAggregatedLogFormat {
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
ContainerId testContainerId1 =
- ContainerId.newInstance(applicationAttemptId, 1);
+ ContainerId.newContainerId(applicationAttemptId, 1);
Path appDir =
new Path(srcFileRoot, testContainerId1.getApplicationAttemptId()
.getApplicationId().toString());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
index 0a17433..2a5762c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
@@ -207,7 +207,7 @@ public class TestAggregatedLogsBlock {
throws Exception {
ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1);
- ContainerId containerId = ContainerIdPBImpl.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerIdPBImpl.newContainerId(appAttemptId, 1);
String path = "target/logs/" + user
+ "/logs/application_0_0001/localhost_1234";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
index dc4f9e2..834dcf1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
@@ -134,7 +134,7 @@ public class TestYARNTokenIdentifier {
@Test
public void testContainerTokenIdentifier() throws IOException {
- ContainerId containerID = ContainerId.newInstance(
+ ContainerId containerID = ContainerId.newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(
1, 1), 1), 1);
String hostName = "host0";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
index 1708da2..de4051a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryStoreTestUtils.java
@@ -58,7 +58,7 @@ public class ApplicationHistoryStoreTestUtils {
ApplicationAttemptId appAttemptId) throws IOException {
store.applicationAttemptStarted(ApplicationAttemptStartData.newInstance(
appAttemptId, appAttemptId.toString(), 0,
- ContainerId.newInstance(appAttemptId, 1)));
+ ContainerId.newContainerId(appAttemptId, 1)));
}
protected void writeApplicationAttemptFinishData(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
index 60027e9..7c2593d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
@@ -142,7 +142,7 @@ public class TestApplicationHistoryClientService {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
GetContainerReportRequest request =
GetContainerReportRequest.newInstance(containerId);
GetContainerReportResponse response =
@@ -160,8 +160,8 @@ public class TestApplicationHistoryClientService {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
- ContainerId containerId1 = ContainerId.newInstance(appAttemptId, 2);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
+ ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 2);
GetContainersRequest request =
GetContainersRequest.newInstance(appAttemptId);
GetContainersResponse response =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
index 856b88d..a093f19 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
@@ -141,7 +141,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
store.put(entities);
for (int k = 1; k <= scale; ++k) {
entities = new TimelineEntities();
- ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
entities.addEntity(createContainerEntity(containerId));
store.put(entities);
}
@@ -238,7 +238,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
}
Assert.assertNotNull(appAttempt);
Assert.assertEquals(appAttemptId, appAttempt.getApplicationAttemptId());
- Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+ Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1),
appAttempt.getAMContainerId());
Assert.assertEquals("test host", appAttempt.getHost());
Assert.assertEquals(100, appAttempt.getRpcPort());
@@ -253,7 +253,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
@Test
public void testGetContainerReport() throws Exception {
final ContainerId containerId =
- ContainerId.newInstance(ApplicationAttemptId.newInstance(
+ ContainerId.newContainerId(ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0, 1), 1), 1);
ContainerReport container;
if (callerUGI == null) {
@@ -466,7 +466,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
eventInfo.put(AppAttemptMetricsConstants.HOST_EVENT_INFO, "test host");
eventInfo.put(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO, 100);
eventInfo.put(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO,
- ContainerId.newInstance(appAttemptId, 1));
+ ContainerId.newContainerId(appAttemptId, 1));
tEvent.setEventInfo(eventInfo);
entity.addEvent(tEvent);
tEvent = new TimelineEvent();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
index 3a75d9e..c91d9f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -121,7 +121,7 @@ public class TestFileSystemApplicationHistoryStore extends
}
// write container history data
for (int k = 1; k <= num; ++k) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
writeContainerStartData(containerId);
if (missingContainer && k == num) {
continue;
@@ -172,7 +172,7 @@ public class TestFileSystemApplicationHistoryStore extends
// read container history data
Assert.assertEquals(num, store.getContainers(appAttemptId).size());
for (int k = 1; k <= num; ++k) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
ContainerHistoryData containerData = store.getContainer(containerId);
Assert.assertNotNull(containerData);
Assert.assertEquals(Priority.newInstance(containerId.getId()),
@@ -187,7 +187,7 @@ public class TestFileSystemApplicationHistoryStore extends
ContainerHistoryData masterContainer =
store.getAMContainer(appAttemptId);
Assert.assertNotNull(masterContainer);
- Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+ Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1),
masterContainer.getContainerId());
}
}
@@ -215,7 +215,7 @@ public class TestFileSystemApplicationHistoryStore extends
Assert.assertTrue(e.getMessage().contains("is not opened"));
}
// write container history data
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
try {
writeContainerStartData(containerId);
Assert.fail();
@@ -240,7 +240,7 @@ public class TestFileSystemApplicationHistoryStore extends
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
for (int i = 1; i <= 100000; ++i) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
index 6e9e242..556db2b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
@@ -137,7 +137,7 @@ public class TestMemoryApplicationHistoryStore extends
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
try {
writeContainerFinishData(containerId);
Assert.fail();
@@ -149,14 +149,14 @@ public class TestMemoryApplicationHistoryStore extends
writeApplicationAttemptStartData(appAttemptId);
int numContainers = 5;
for (int i = 1; i <= numContainers; ++i) {
- containerId = ContainerId.newInstance(appAttemptId, i);
+ containerId = ContainerId.newContainerId(appAttemptId, i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
Assert
.assertEquals(numContainers, store.getContainers(appAttemptId).size());
for (int i = 1; i <= numContainers; ++i) {
- containerId = ContainerId.newInstance(appAttemptId, i);
+ containerId = ContainerId.newContainerId(appAttemptId, i);
ContainerHistoryData data = store.getContainer(containerId);
Assert.assertNotNull(data);
Assert.assertEquals(Priority.newInstance(containerId.getId()),
@@ -165,11 +165,11 @@ public class TestMemoryApplicationHistoryStore extends
}
ContainerHistoryData masterContainer = store.getAMContainer(appAttemptId);
Assert.assertNotNull(masterContainer);
- Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1),
+ Assert.assertEquals(ContainerId.newContainerId(appAttemptId, 1),
masterContainer.getContainerId());
writeApplicationAttemptFinishData(appAttemptId);
// Write again
- containerId = ContainerId.newInstance(appAttemptId, 1);
+ containerId = ContainerId.newContainerId(appAttemptId, 1);
try {
writeContainerStartData(containerId);
Assert.fail();
@@ -194,7 +194,7 @@ public class TestMemoryApplicationHistoryStore extends
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
for (int i = 1; i <= numContainers; ++i) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId, i);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
index 82c4276..7bac6f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
@@ -134,7 +134,7 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
containerPageInstance.set(
YarnWebParams.CONTAINER_ID,
ContainerId
- .newInstance(
+ .newContainerId(
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1),
1).toString());
containerPageInstance.render();
@@ -153,7 +153,7 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
ApplicationAttemptId.newInstance(appId, j);
writeApplicationAttemptStartData(appAttemptId);
for (int k = 1; k <= numContainers; ++k) {
- ContainerId containerId = ContainerId.newInstance(appAttemptId, k);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cd07b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
index da39ce3..76bf8c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
@@ -338,7 +338,7 @@ public class TestAHSWebServices extends JerseyTest {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
- ContainerId containerId = ContainerId.newInstance(appAttemptId, 1);
+ ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
WebResource r = resource();
ClientResponse response =
r.path("ws").path("v1").path("applicationhistory").path("apps")
[11/25] hadoop git commit: Release Notes for hadoop-2.6.0.
Posted by vi...@apache.org.
Release Notes for hadoop-2.6.0.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab30d513
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab30d513
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab30d513
Branch: refs/heads/HDFS-EC
Commit: ab30d513b6979195853a1b114062124ba4295e35
Parents: b31b4bf
Author: Arun C. Murthy <ac...@apache.org>
Authored: Sun Nov 9 19:15:03 2014 -0800
Committer: Arun C. Murthy <ac...@apache.org>
Committed: Sun Nov 9 19:15:03 2014 -0800
----------------------------------------------------------------------
.../src/main/docs/releasenotes.html | 3525 ++++++++++++++++++
1 file changed, 3525 insertions(+)
----------------------------------------------------------------------
[13/25] hadoop git commit: Set the release date for 2.5.2
Posted by vi...@apache.org.
Set the release date for 2.5.2
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68a05087
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68a05087
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68a05087
Branch: refs/heads/HDFS-EC
Commit: 68a050872896f93582f95420dd6e2bdefa4fd7cc
Parents: eace218
Author: Karthik Kambatla <ka...@apache.org>
Authored: Mon Nov 10 15:22:53 2014 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Mon Nov 10 15:22:53 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
hadoop-mapreduce-project/CHANGES.txt | 2 +-
hadoop-yarn-project/CHANGES.txt | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68a05087/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 86d81ad..d3c4c00 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1097,7 +1097,7 @@ Release 2.6.0 - 2014-11-15
HADOOP-11282. Skip NFS TestShellBasedIdMapping tests that are irrelevant on
Windows. (cnauroth)
-Release 2.5.2 - UNRELEASED
+Release 2.5.2 - 2014-11-10
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68a05087/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index beac1c3..067868c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1418,7 +1418,7 @@ Release 2.6.0 - 2014-11-15
HDFS-7383. DataNode.requestShortCircuitFdsForRead may throw
NullPointerException. (szetszwo via suresh)
-Release 2.5.2 - UNRELEASED
+Release 2.5.2 - 2014-11-10
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68a05087/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 44dc557..96bb690 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -465,7 +465,7 @@ Release 2.6.0 - 2014-11-15
MAPREDUCE-5958. Wrong reduce task progress if map output is compressed
(Emilio Coppa and jlowe via kihwal)
-Release 2.5.2 - UNRELEASED
+Release 2.5.2 - 2014-11-10
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68a05087/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b76e28f..259f4e2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -929,7 +929,7 @@ Release 2.6.0 - 2014-11-15
consistent with the (somewhat incorrect) behaviour in the non-recovery case.
(Jian He via vinodkv)
-Release 2.5.2 - UNRELEASED
+Release 2.5.2 - 2014-11-10
INCOMPATIBLE CHANGES
[19/25] hadoop git commit: YARN-2843. Fixed NodeLabelsManager to trim
inputs for hosts and labels so as to make them work correctly. Contributed by
Wangda Tan.
Posted by vi...@apache.org.
YARN-2843. Fixed NodeLabelsManager to trim inputs for hosts and labels so as to make them work correctly. Contributed by Wangda Tan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fd97f9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fd97f9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fd97f9c
Branch: refs/heads/HDFS-EC
Commit: 0fd97f9c1989a793b882e6678285607472a3f75a
Parents: 061bc29
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Tue Nov 11 12:33:10 2014 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Tue Nov 11 12:33:10 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../hadoop/yarn/client/cli/RMAdminCLI.java | 14 ++----------
.../nodelabels/CommonNodeLabelsManager.java | 20 ++++++++++++++++-
.../apache/hadoop/yarn/util/ConverterUtils.java | 2 +-
.../yarn/nodelabels/NodeLabelTestBase.java | 12 +++++-----
.../nodelabels/TestCommonNodeLabelsManager.java | 23 ++++++++++++++++++++
6 files changed, 53 insertions(+), 21 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fd97f9c/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c6a063c..3dc6d9f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -77,6 +77,9 @@ Release 2.7.0 - UNRELEASED
YARN-2841. RMProxy should retry EOFException. (Jian He via xgong)
+ YARN-2843. Fixed NodeLabelsManager to trim inputs for hosts and labels so
+ as to make them work correctly. (Wangda Tan via vinodkv)
+
Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fd97f9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 65978c7..89d87cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsC
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
+import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.collect.ImmutableMap;
@@ -393,18 +394,7 @@ public class RMAdminCLI extends HAAdmin {
throw new IOException("node name cannot be empty");
}
- String nodeName;
- int port;
- if (nodeIdStr.contains(":")) {
- nodeName = nodeIdStr.substring(0, nodeIdStr.indexOf(":"));
- port = Integer.valueOf(nodeIdStr.substring(nodeIdStr.indexOf(":") + 1));
- } else {
- nodeName = nodeIdStr;
- port = 0;
- }
-
- NodeId nodeId = NodeId.newInstance(nodeName, port);
-
+ NodeId nodeId = ConverterUtils.toNodeIdWithDefaultPort(nodeIdStr);
map.put(nodeId, new HashSet<String>());
for (int i = 1; i < splits.length; i++) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fd97f9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 1d86211..daefe8d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -344,6 +344,7 @@ public class CommonNodeLabelsManager extends AbstractService {
*/
public void addLabelsToNode(Map<NodeId, Set<String>> addedLabelsToNode)
throws IOException {
+ addedLabelsToNode = normalizeNodeIdToLabels(addedLabelsToNode);
checkAddLabelsToNode(addedLabelsToNode);
internalAddLabelsToNode(addedLabelsToNode);
}
@@ -409,6 +410,8 @@ public class CommonNodeLabelsManager extends AbstractService {
*/
public void removeFromClusterNodeLabels(Collection<String> labelsToRemove)
throws IOException {
+ labelsToRemove = normalizeLabels(labelsToRemove);
+
checkRemoveFromClusterNodeLabels(labelsToRemove);
internalRemoveFromClusterNodeLabels(labelsToRemove);
@@ -518,6 +521,8 @@ public class CommonNodeLabelsManager extends AbstractService {
public void
removeLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode)
throws IOException {
+ removeLabelsFromNode = normalizeNodeIdToLabels(removeLabelsFromNode);
+
checkRemoveLabelsFromNode(removeLabelsFromNode);
internalRemoveLabelsFromNode(removeLabelsFromNode);
@@ -590,6 +595,8 @@ public class CommonNodeLabelsManager extends AbstractService {
*/
public void replaceLabelsOnNode(Map<NodeId, Set<String>> replaceLabelsToNode)
throws IOException {
+ replaceLabelsToNode = normalizeNodeIdToLabels(replaceLabelsToNode);
+
checkReplaceLabelsOnNode(replaceLabelsToNode);
internalReplaceLabelsOnNode(replaceLabelsToNode);
@@ -665,7 +672,7 @@ public class CommonNodeLabelsManager extends AbstractService {
return NO_LABEL;
}
- private Set<String> normalizeLabels(Set<String> labels) {
+ private Set<String> normalizeLabels(Collection<String> labels) {
Set<String> newLabels = new HashSet<String>();
for (String label : labels) {
newLabels.add(normalizeLabel(label));
@@ -732,4 +739,15 @@ public class CommonNodeLabelsManager extends AbstractService {
nodeCollections.put(hostName, host);
}
}
+
+ protected Map<NodeId, Set<String>> normalizeNodeIdToLabels(
+ Map<NodeId, Set<String>> nodeIdToLabels) {
+ Map<NodeId, Set<String>> newMap = new HashMap<NodeId, Set<String>>();
+ for (Entry<NodeId, Set<String>> entry : nodeIdToLabels.entrySet()) {
+ NodeId id = entry.getKey();
+ Set<String> labels = entry.getValue();
+ newMap.put(id, normalizeLabels(labels));
+ }
+ return newMap;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fd97f9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
index 012d799..73ec906 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
@@ -167,7 +167,7 @@ public class ConverterUtils {
}
try {
NodeId nodeId =
- NodeId.newInstance(parts[0], Integer.parseInt(parts[1]));
+ NodeId.newInstance(parts[0].trim(), Integer.parseInt(parts[1]));
return nodeId;
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid port: " + parts[1], e);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fd97f9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
index 9749299..ff0e101 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.yarn.nodelabels;
import java.util.Collection;
-import java.util.Iterator;
+import java.util.HashSet;
import java.util.Map;
import java.util.Set;
@@ -49,12 +49,10 @@ public class NodeLabelTestBase {
public static void assertCollectionEquals(Collection<String> c1,
Collection<String> c2) {
- Assert.assertEquals(c1.size(), c2.size());
- Iterator<String> i1 = c1.iterator();
- Iterator<String> i2 = c2.iterator();
- while (i1.hasNext()) {
- Assert.assertEquals(i1.next(), i2.next());
- }
+ Set<String> s1 = new HashSet<String>(c1);
+ Set<String> s2 = new HashSet<String>(c2);
+ Assert.assertEquals(s1, s2);
+ Assert.assertTrue(s1.containsAll(s2));
}
public static <E> Set<E> toSet(E... elements) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fd97f9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
index ea29f3a..a56a595 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestCommonNodeLabelsManager.java
@@ -258,4 +258,27 @@ public class TestCommonNodeLabelsManager extends NodeLabelTestBase {
Assert.assertTrue(mgr.getClusterNodeLabels().isEmpty());
assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p2", "p3"));
}
+
+ @Test(timeout = 5000)
+ public void testTrimLabelsWhenAddRemoveNodeLabels() throws IOException {
+ mgr.addToCluserNodeLabels(toSet(" p1"));
+ assertCollectionEquals(mgr.getClusterNodeLabels(), toSet("p1"));
+ mgr.removeFromClusterNodeLabels(toSet("p1 "));
+ Assert.assertTrue(mgr.getClusterNodeLabels().isEmpty());
+ }
+
+ @Test(timeout = 5000)
+ public void testTrimLabelsWhenModifyLabelsOnNodes() throws IOException {
+ mgr.addToCluserNodeLabels(toSet(" p1", "p2"));
+ mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1 ", "p2")));
+ assertMapEquals(
+ mgr.getNodeLabels(),
+ ImmutableMap.of(toNodeId("n1"), toSet("p1", "p2")));
+ mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet(" p2")));
+ assertMapEquals(
+ mgr.getNodeLabels(),
+ ImmutableMap.of(toNodeId("n1"), toSet("p2")));
+ mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet(" p2 ")));
+ Assert.assertTrue(mgr.getNodeLabels().isEmpty());
+ }
}
\ No newline at end of file
[05/25] hadoop git commit: HADOOP-10786. Moved to hadoop-2.7.X.
Posted by vi...@apache.org.
HADOOP-10786. Moved to hadoop-2.7.X.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14b87b70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14b87b70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14b87b70
Branch: refs/heads/HDFS-EC
Commit: 14b87b70a8dfc03801dcf5f33caa7fd2cc589840
Parents: 4ddc5ca
Author: Arun C. Murthy <ac...@apache.org>
Authored: Sun Nov 9 18:18:22 2014 -0800
Committer: Arun C. Murthy <ac...@apache.org>
Committed: Sun Nov 9 18:18:22 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b87b70/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 99010bd..ef487bb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -369,6 +369,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
+ HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
+
OPTIMIZATIONS
BUG FIXES
@@ -728,8 +730,6 @@ Release 2.6.0 - UNRELEASED
HADOOP-11247. Fix a couple javac warnings in NFS. (Brandon Li via wheat9)
- HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
-
BUG FIXES
HADOOP-11182. GraphiteSink emits wrong timestamps (Sascha Coenen via raviprak)
[20/25] hadoop git commit: HDFS-7381. Decouple the management of
block id and gen stamps from FSNamesystem. Contributed by Haohui Mai.
Posted by vi...@apache.org.
HDFS-7381. Decouple the management of block id and gen stamps from FSNamesystem. Contributed by Haohui Mai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/571e9c62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/571e9c62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/571e9c62
Branch: refs/heads/HDFS-EC
Commit: 571e9c623241106dad5521a870fb8daef3f2b00a
Parents: 0fd97f9
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 11 12:41:51 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 11 12:42:12 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../server/blockmanagement/BlockIdManager.java | 208 ++++++++++++++++++
.../SequentialBlockIdGenerator.java | 66 ++++++
.../hdfs/server/namenode/FSEditLogLoader.java | 6 +-
.../hdfs/server/namenode/FSImageFormat.java | 22 +-
.../server/namenode/FSImageFormatProtobuf.java | 23 +-
.../hdfs/server/namenode/FSNamesystem.java | 184 ++--------------
.../namenode/SequentialBlockIdGenerator.java | 66 ------
.../blockmanagement/TestSequentialBlockId.java | 197 +++++++++++++++++
.../hdfs/server/namenode/TestSaveNamespace.java | 11 +-
.../server/namenode/TestSequentialBlockId.java | 209 -------------------
11 files changed, 523 insertions(+), 472 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 067868c..8922a0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -350,6 +350,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7365. Remove hdfs.server.blockmanagement.MutableBlockCollection.
(Li Lu via wheat9)
+ HDFS-7381. Decouple the management of block id and gen stamps from
+ FSNamesystem. (wheat9)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
new file mode 100644
index 0000000..8f547f1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+
+import java.io.IOException;
+
+/**
+ * BlockIdManager allocates the generation stamps and the block ID. The
+ * {@see FSNamesystem} is responsible for persisting the allocations in the
+ * {@see EditLog}.
+ */
+public class BlockIdManager {
+ /**
+ * The global generation stamp for legacy blocks with randomly
+ * generated block IDs.
+ */
+ private final GenerationStamp generationStampV1 = new GenerationStamp();
+ /**
+ * The global generation stamp for this file system.
+ */
+ private final GenerationStamp generationStampV2 = new GenerationStamp();
+ /**
+ * The value of the generation stamp when the first switch to sequential
+ * block IDs was made. Blocks with generation stamps below this value
+ * have randomly allocated block IDs. Blocks with generation stamps above
+ * this value had sequentially allocated block IDs. Read from the fsImage
+ * (or initialized as an offset from the V1 (legacy) generation stamp on
+ * upgrade).
+ */
+ private long generationStampV1Limit;
+ /**
+ * The global block ID space for this file system.
+ */
+ private final SequentialBlockIdGenerator blockIdGenerator;
+
+ public BlockIdManager(BlockManager blockManager) {
+ this.generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
+ this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
+ }
+
+ /**
+ * Upgrades the generation stamp for the filesystem
+ * by reserving a sufficient range for all existing blocks.
+ * Should be invoked only during the first upgrade to
+ * sequential block IDs.
+ */
+ public long upgradeGenerationStampToV2() {
+ Preconditions.checkState(generationStampV2.getCurrentValue() ==
+ GenerationStamp.LAST_RESERVED_STAMP);
+ generationStampV2.skipTo(generationStampV1.getCurrentValue() +
+ HdfsConstants.RESERVED_GENERATION_STAMPS_V1);
+
+ generationStampV1Limit = generationStampV2.getCurrentValue();
+ return generationStampV2.getCurrentValue();
+ }
+
+ /**
+ * Sets the generation stamp that delineates random and sequentially
+ * allocated block IDs.
+ *
+ * @param stamp set generation stamp limit to this value
+ */
+ public void setGenerationStampV1Limit(long stamp) {
+ Preconditions.checkState(generationStampV1Limit == GenerationStamp
+ .GRANDFATHER_GENERATION_STAMP);
+ generationStampV1Limit = stamp;
+ }
+
+ /**
+ * Gets the value of the generation stamp that delineates sequential
+ * and random block IDs.
+ */
+ public long getGenerationStampAtblockIdSwitch() {
+ return generationStampV1Limit;
+ }
+
+ @VisibleForTesting
+ SequentialBlockIdGenerator getBlockIdGenerator() {
+ return blockIdGenerator;
+ }
+
+ /**
+ * Sets the maximum allocated block ID for this filesystem. This is
+ * the basis for allocating new block IDs.
+ */
+ public void setLastAllocatedBlockId(long blockId) {
+ blockIdGenerator.skipTo(blockId);
+ }
+
+ /**
+ * Gets the maximum sequentially allocated block ID for this filesystem
+ */
+ public long getLastAllocatedBlockId() {
+ return blockIdGenerator.getCurrentValue();
+ }
+
+ /**
+ * Sets the current generation stamp for legacy blocks
+ */
+ public void setGenerationStampV1(long stamp) {
+ generationStampV1.setCurrentValue(stamp);
+ }
+
+ /**
+ * Gets the current generation stamp for legacy blocks
+ */
+ public long getGenerationStampV1() {
+ return generationStampV1.getCurrentValue();
+ }
+
+ /**
+ * Gets the current generation stamp for this filesystem
+ */
+ public void setGenerationStampV2(long stamp) {
+ generationStampV2.setCurrentValue(stamp);
+ }
+
+ public long getGenerationStampV2() {
+ return generationStampV2.getCurrentValue();
+ }
+
+ /**
+ * Increments, logs and then returns the stamp
+ */
+ public long nextGenerationStamp(boolean legacyBlock) throws IOException {
+ return legacyBlock ? getNextGenerationStampV1() :
+ getNextGenerationStampV2();
+ }
+
+ @VisibleForTesting
+ long getNextGenerationStampV1() throws IOException {
+ long genStampV1 = generationStampV1.nextValue();
+
+ if (genStampV1 >= generationStampV1Limit) {
+ // We ran out of generation stamps for legacy blocks. In practice, it
+ // is extremely unlikely as we reserved 1T v1 generation stamps. The
+ // result is that we can no longer append to the legacy blocks that
+ // were created before the upgrade to sequential block IDs.
+ throw new OutOfV1GenerationStampsException();
+ }
+
+ return genStampV1;
+ }
+
+ @VisibleForTesting
+ long getNextGenerationStampV2() {
+ return generationStampV2.nextValue();
+ }
+
+ public long getGenerationStampV1Limit() {
+ return generationStampV1Limit;
+ }
+
+ /**
+ * Determine whether the block ID was randomly generated (legacy) or
+ * sequentially generated. The generation stamp value is used to
+ * make the distinction.
+ *
+ * @return true if the block ID was randomly generated, false otherwise.
+ */
+ public boolean isLegacyBlock(Block block) {
+ return block.getGenerationStamp() < getGenerationStampV1Limit();
+ }
+
+ /**
+ * Increments, logs and then returns the block ID
+ */
+ public long nextBlockId() {
+ return blockIdGenerator.nextValue();
+ }
+
+ public boolean isGenStampInFuture(Block block) {
+ if (isLegacyBlock(block)) {
+ return block.getGenerationStamp() > getGenerationStampV1();
+ } else {
+ return block.getGenerationStamp() > getGenerationStampV2();
+ }
+ }
+
+ public void clear() {
+ generationStampV1.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
+ generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
+ getBlockIdGenerator().setCurrentValue(SequentialBlockIdGenerator
+ .LAST_RESERVED_BLOCK_ID);
+ setGenerationStampV1Limit(GenerationStamp.GRANDFATHER_GENERATION_STAMP);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockIdGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockIdGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockIdGenerator.java
new file mode 100644
index 0000000..eef8857
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockIdGenerator.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.util.SequentialNumber;
+
+/**
+ * Generate the next valid block ID by incrementing the maximum block
+ * ID allocated so far, starting at 2^30+1.
+ *
+ * Block IDs used to be allocated randomly in the past. Hence we may
+ * find some conflicts while stepping through the ID space sequentially.
+ * However given the sparsity of the ID space, conflicts should be rare
+ * and can be skipped over when detected.
+ */
+@InterfaceAudience.Private
+public class SequentialBlockIdGenerator extends SequentialNumber {
+ /**
+ * The last reserved block ID.
+ */
+ public static final long LAST_RESERVED_BLOCK_ID = 1024L * 1024 * 1024;
+
+ private final BlockManager blockManager;
+
+ SequentialBlockIdGenerator(BlockManager blockManagerRef) {
+ super(LAST_RESERVED_BLOCK_ID);
+ this.blockManager = blockManagerRef;
+ }
+
+ @Override // NumberGenerator
+ public long nextValue() {
+ Block b = new Block(super.nextValue());
+
+ // There may be an occasional conflict with randomly generated
+ // block IDs. Skip over the conflicts.
+ while(isValidBlock(b)) {
+ b.setBlockId(super.nextValue());
+ }
+ return b.getBlockId();
+ }
+
+ /**
+ * Returns whether the given block is one pointed-to by a file.
+ */
+ private boolean isValidBlock(Block b) {
+ return (blockManager.getBlockCollection(b) != null);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 492a5ac..716768e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -532,7 +532,7 @@ public class FSEditLogLoader {
}
case OP_SET_GENSTAMP_V1: {
SetGenstampV1Op setGenstampV1Op = (SetGenstampV1Op)op;
- fsNamesys.setGenerationStampV1(setGenstampV1Op.genStampV1);
+ fsNamesys.getBlockIdManager().setGenerationStampV1(setGenstampV1Op.genStampV1);
break;
}
case OP_SET_PERMISSIONS: {
@@ -722,12 +722,12 @@ public class FSEditLogLoader {
}
case OP_SET_GENSTAMP_V2: {
SetGenstampV2Op setGenstampV2Op = (SetGenstampV2Op) op;
- fsNamesys.setGenerationStampV2(setGenstampV2Op.genStampV2);
+ fsNamesys.getBlockIdManager().setGenerationStampV2(setGenstampV2Op.genStampV2);
break;
}
case OP_ALLOCATE_BLOCK_ID: {
AllocateBlockIdOp allocateBlockIdOp = (AllocateBlockIdOp) op;
- fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId);
+ fsNamesys.getBlockIdManager().setLastAllocatedBlockId(allocateBlockIdOp.blockId);
break;
}
case OP_ROLLING_UPGRADE_START: {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 7864092..76f51cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -341,24 +341,26 @@ public class FSImageFormat {
// read in the last generation stamp for legacy blocks.
long genstamp = in.readLong();
- namesystem.setGenerationStampV1(genstamp);
-
+ namesystem.getBlockIdManager().setGenerationStampV1(genstamp);
+
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.SEQUENTIAL_BLOCK_ID, imgVersion)) {
// read the starting generation stamp for sequential block IDs
genstamp = in.readLong();
- namesystem.setGenerationStampV2(genstamp);
+ namesystem.getBlockIdManager().setGenerationStampV2(genstamp);
// read the last generation stamp for blocks created after
// the switch to sequential block IDs.
long stampAtIdSwitch = in.readLong();
- namesystem.setGenerationStampV1Limit(stampAtIdSwitch);
+ namesystem.getBlockIdManager().setGenerationStampV1Limit(stampAtIdSwitch);
// read the max sequential block ID.
long maxSequentialBlockId = in.readLong();
- namesystem.setLastAllocatedBlockId(maxSequentialBlockId);
+ namesystem.getBlockIdManager().setLastAllocatedBlockId(maxSequentialBlockId);
} else {
- long startingGenStamp = namesystem.upgradeGenerationStampToV2();
+
+ long startingGenStamp = namesystem.getBlockIdManager()
+ .upgradeGenerationStampToV2();
// This is an upgrade.
LOG.info("Upgrading to sequential block IDs. Generation stamp " +
"for new blocks set to " + startingGenStamp);
@@ -1251,10 +1253,10 @@ public class FSImageFormat {
out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
.getNamespaceID());
out.writeLong(numINodes);
- out.writeLong(sourceNamesystem.getGenerationStampV1());
- out.writeLong(sourceNamesystem.getGenerationStampV2());
- out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
- out.writeLong(sourceNamesystem.getLastAllocatedBlockId());
+ out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV1());
+ out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2());
+ out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
+ out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId());
out.writeLong(context.getTxId());
out.writeLong(sourceNamesystem.getLastInodeId());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 4387cff..3ee848a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -46,8 +46,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
-import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
-import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection;
@@ -293,10 +292,11 @@ public final class FSImageFormatProtobuf {
private void loadNameSystemSection(InputStream in) throws IOException {
NameSystemSection s = NameSystemSection.parseDelimitedFrom(in);
- fsn.setGenerationStampV1(s.getGenstampV1());
- fsn.setGenerationStampV2(s.getGenstampV2());
- fsn.setGenerationStampV1Limit(s.getGenstampV1Limit());
- fsn.setLastAllocatedBlockId(s.getLastAllocatedBlockId());
+ BlockIdManager blockIdManager = fsn.getBlockIdManager();
+ blockIdManager.setGenerationStampV1(s.getGenstampV1());
+ blockIdManager.setGenerationStampV2(s.getGenstampV2());
+ blockIdManager.setGenerationStampV1Limit(s.getGenstampV1Limit());
+ blockIdManager.setLastAllocatedBlockId(s.getLastAllocatedBlockId());
imgTxId = s.getTransactionId();
if (s.hasRollingUpgradeStartTime()
&& fsn.getFSImage().hasRollbackFSImage()) {
@@ -407,7 +407,7 @@ public final class FSImageFormatProtobuf {
FileOutputStream fout = new FileOutputStream(file);
fileChannel = fout.getChannel();
try {
- saveInternal(fout, compression, file.getAbsolutePath().toString());
+ saveInternal(fout, compression, file.getAbsolutePath());
} finally {
fout.close();
}
@@ -531,11 +531,12 @@ public final class FSImageFormatProtobuf {
throws IOException {
final FSNamesystem fsn = context.getSourceNamesystem();
OutputStream out = sectionOutputStream;
+ BlockIdManager blockIdManager = fsn.getBlockIdManager();
NameSystemSection.Builder b = NameSystemSection.newBuilder()
- .setGenstampV1(fsn.getGenerationStampV1())
- .setGenstampV1Limit(fsn.getGenerationStampV1Limit())
- .setGenstampV2(fsn.getGenerationStampV2())
- .setLastAllocatedBlockId(fsn.getLastAllocatedBlockId())
+ .setGenstampV1(blockIdManager.getGenerationStampV1())
+ .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit())
+ .setGenstampV2(blockIdManager.getGenerationStampV2())
+ .setLastAllocatedBlockId(blockIdManager.getLastAllocatedBlockId())
.setTransactionId(context.getTxId());
// We use the non-locked version of getNamespaceInfo here since
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 52c12c0..b086390 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -204,6 +204,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -211,8 +212,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.OutOfV1GenerationStampsException;
-import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
@@ -331,6 +330,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
};
+ private final BlockIdManager blockIdManager;
+
@VisibleForTesting
public boolean isAuditEnabled() {
return !isDefaultAuditLogger || auditLog.isInfoEnabled();
@@ -490,34 +491,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private final long minBlockSize; // minimum block size
private final long maxBlocksPerFile; // maximum # of blocks per file
- /**
- * The global generation stamp for legacy blocks with randomly
- * generated block IDs.
- */
- private final GenerationStamp generationStampV1 = new GenerationStamp();
-
- /**
- * The global generation stamp for this file system.
- */
- private final GenerationStamp generationStampV2 = new GenerationStamp();
-
- /**
- * The value of the generation stamp when the first switch to sequential
- * block IDs was made. Blocks with generation stamps below this value
- * have randomly allocated block IDs. Blocks with generation stamps above
- * this value had sequentially allocated block IDs. Read from the fsImage
- * (or initialized as an offset from the V1 (legacy) generation stamp on
- * upgrade).
- */
- private long generationStampV1Limit =
- GenerationStamp.GRANDFATHER_GENERATION_STAMP;
-
- /**
- * The global block ID space for this file system.
- */
- @VisibleForTesting
- private final SequentialBlockIdGenerator blockIdGenerator;
-
// precision of access times.
private final long accessTimePrecision;
@@ -646,11 +619,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
void clear() {
dir.reset();
dtSecretManager.reset();
- generationStampV1.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
- generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
- blockIdGenerator.setCurrentValue(
- SequentialBlockIdGenerator.LAST_RESERVED_BLOCK_ID);
- generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
+ blockIdManager.clear();
leaseManager.removeAllLeases();
inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
snapshotManager.clearSnapshottableDirs();
@@ -798,7 +767,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
this.blockManager = new BlockManager(this, this, conf);
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
- this.blockIdGenerator = new SequentialBlockIdGenerator(this.blockManager);
+ this.blockIdManager = new BlockIdManager(blockManager);
this.isStoragePolicyEnabled =
conf.getBoolean(DFS_STORAGE_POLICY_ENABLED_KEY,
@@ -1360,7 +1329,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws SafeModeException
* Otherwise if NameNode is in SafeMode.
*/
- private void checkNameNodeSafeMode(String errorMsg)
+ void checkNameNodeSafeMode(String errorMsg)
throws RetriableException, SafeModeException {
if (isInSafeMode()) {
SafeModeException se = new SafeModeException(errorMsg, safeMode);
@@ -4591,7 +4560,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return true;
}
// start recovery of the last block for this file
- long blockRecoveryId = nextGenerationStamp(isLegacyBlock(uc));
+ long blockRecoveryId = nextGenerationStamp(blockIdManager.isLegacyBlock(uc));
lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);
uc.initializeBlockRecovery(blockRecoveryId);
leaseManager.renewLease(lease);
@@ -6728,91 +6697,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
/**
- * Sets the current generation stamp for legacy blocks
- */
- void setGenerationStampV1(long stamp) {
- generationStampV1.setCurrentValue(stamp);
- }
-
- /**
- * Gets the current generation stamp for legacy blocks
- */
- long getGenerationStampV1() {
- return generationStampV1.getCurrentValue();
- }
-
- /**
- * Gets the current generation stamp for this filesystem
- */
- void setGenerationStampV2(long stamp) {
- generationStampV2.setCurrentValue(stamp);
- }
-
- /**
- * Gets the current generation stamp for this filesystem
- */
- long getGenerationStampV2() {
- return generationStampV2.getCurrentValue();
- }
-
- /**
- * Upgrades the generation stamp for the filesystem
- * by reserving a sufficient range for all existing blocks.
- * Should be invoked only during the first upgrade to
- * sequential block IDs.
- */
- long upgradeGenerationStampToV2() {
- Preconditions.checkState(generationStampV2.getCurrentValue() ==
- GenerationStamp.LAST_RESERVED_STAMP);
-
- generationStampV2.skipTo(
- generationStampV1.getCurrentValue() +
- HdfsConstants.RESERVED_GENERATION_STAMPS_V1);
-
- generationStampV1Limit = generationStampV2.getCurrentValue();
- return generationStampV2.getCurrentValue();
- }
-
- /**
- * Sets the generation stamp that delineates random and sequentially
- * allocated block IDs.
- * @param stamp set generation stamp limit to this value
- */
- void setGenerationStampV1Limit(long stamp) {
- Preconditions.checkState(generationStampV1Limit ==
- GenerationStamp.GRANDFATHER_GENERATION_STAMP);
- generationStampV1Limit = stamp;
- }
-
- /**
- * Gets the value of the generation stamp that delineates sequential
- * and random block IDs.
- */
- long getGenerationStampAtblockIdSwitch() {
- return generationStampV1Limit;
- }
-
- @VisibleForTesting
- SequentialBlockIdGenerator getBlockIdGenerator() {
- return blockIdGenerator;
- }
-
- /**
- * Sets the maximum allocated block ID for this filesystem. This is
- * the basis for allocating new block IDs.
- */
- void setLastAllocatedBlockId(long blockId) {
- blockIdGenerator.skipTo(blockId);
- }
-
- /**
- * Gets the maximum sequentially allocated block ID for this filesystem
- */
- long getLastAllocatedBlockId() {
- return blockIdGenerator.getCurrentValue();
- }
-
- /**
* Increments, logs and then returns the stamp
*/
long nextGenerationStamp(boolean legacyBlock)
@@ -6820,12 +6704,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
assert hasWriteLock();
checkNameNodeSafeMode("Cannot get next generation stamp");
- long gs;
+ long gs = blockIdManager.nextGenerationStamp(legacyBlock);
if (legacyBlock) {
- gs = getNextGenerationStampV1();
getEditLog().logGenerationStampV1(gs);
} else {
- gs = getNextGenerationStampV2();
getEditLog().logGenerationStampV2(gs);
}
@@ -6833,47 +6715,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return gs;
}
- @VisibleForTesting
- long getNextGenerationStampV1() throws IOException {
- long genStampV1 = generationStampV1.nextValue();
-
- if (genStampV1 >= generationStampV1Limit) {
- // We ran out of generation stamps for legacy blocks. In practice, it
- // is extremely unlikely as we reserved 1T v1 generation stamps. The
- // result is that we can no longer append to the legacy blocks that
- // were created before the upgrade to sequential block IDs.
- throw new OutOfV1GenerationStampsException();
- }
-
- return genStampV1;
- }
-
- @VisibleForTesting
- long getNextGenerationStampV2() {
- return generationStampV2.nextValue();
- }
-
- long getGenerationStampV1Limit() {
- return generationStampV1Limit;
- }
-
- /**
- * Determine whether the block ID was randomly generated (legacy) or
- * sequentially generated. The generation stamp value is used to
- * make the distinction.
- * @return true if the block ID was randomly generated, false otherwise.
- */
- boolean isLegacyBlock(Block block) {
- return block.getGenerationStamp() < getGenerationStampV1Limit();
- }
-
/**
* Increments, logs and then returns the block ID
*/
private long nextBlockId() throws IOException {
assert hasWriteLock();
checkNameNodeSafeMode("Cannot get next block ID");
- final long blockId = blockIdGenerator.nextValue();
+ final long blockId = blockIdManager.nextBlockId();
getEditLog().logAllocateBlockId(blockId);
// NB: callers sync the log
return blockId;
@@ -6988,8 +6836,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkUCBlock(block, clientName);
// get a new generation stamp and an access token
- block.setGenerationStamp(
- nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
+ block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock())));
locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
} finally {
@@ -7861,6 +7708,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
public BlockManager getBlockManager() {
return blockManager;
}
+
+ public BlockIdManager getBlockIdManager() {
+ return blockIdManager;
+ }
+
/** @return the FSDirectory. */
public FSDirectory getFSDirectory() {
return dir;
@@ -7928,11 +7780,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
@Override
public boolean isGenStampInFuture(Block block) {
- if (isLegacyBlock(block)) {
- return block.getGenerationStamp() > getGenerationStampV1();
- } else {
- return block.getGenerationStamp() > getGenerationStampV2();
- }
+ return blockIdManager.isGenStampInFuture(block);
}
@VisibleForTesting
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SequentialBlockIdGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SequentialBlockIdGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SequentialBlockIdGenerator.java
deleted file mode 100644
index 650a69b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SequentialBlockIdGenerator.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.util.SequentialNumber;
-
-/**
- * Generate the next valid block ID by incrementing the maximum block
- * ID allocated so far, starting at 2^30+1.
- *
- * Block IDs used to be allocated randomly in the past. Hence we may
- * find some conflicts while stepping through the ID space sequentially.
- * However given the sparsity of the ID space, conflicts should be rare
- * and can be skipped over when detected.
- */
-@InterfaceAudience.Private
-public class SequentialBlockIdGenerator extends SequentialNumber {
- /**
- * The last reserved block ID.
- */
- public static final long LAST_RESERVED_BLOCK_ID = 1024L * 1024 * 1024;
-
- private final BlockManager blockManager;
-
- SequentialBlockIdGenerator(BlockManager blockManagerRef) {
- super(LAST_RESERVED_BLOCK_ID);
- this.blockManager = blockManagerRef;
- }
-
- @Override // NumberGenerator
- public long nextValue() {
- Block b = new Block(super.nextValue());
-
- // There may be an occasional conflict with randomly generated
- // block IDs. Skip over the conflicts.
- while(isValidBlock(b)) {
- b.setBlockId(super.nextValue());
- }
- return b.getBlockId();
- }
-
- /**
- * Returns whether the given block is one pointed-to by a file.
- */
- private boolean isValidBlock(Block b) {
- return (blockManager.getBlockCollection(b) != null);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java
new file mode 100644
index 0000000..8235294
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+/**
+ * Tests the sequential block ID generation mechanism and block ID
+ * collision handling.
+ */
+public class TestSequentialBlockId {
+ private static final Log LOG = LogFactory.getLog("TestSequentialBlockId");
+
+ final int BLOCK_SIZE = 1024;
+ final int IO_SIZE = BLOCK_SIZE;
+ final short REPLICATION = 1;
+ final long SEED = 0;
+
+ /**
+ * Test that block IDs are generated sequentially.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testBlockIdGeneration() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+ MiniDFSCluster cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+
+ try {
+ cluster.waitActive();
+ FileSystem fs = cluster.getFileSystem();
+
+ // Create a file that is 10 blocks long.
+ Path path = new Path("testBlockIdGeneration.dat");
+ DFSTestUtil.createFile(
+ fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
+ List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
+ LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
+ long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;
+
+ // Ensure that the block IDs are sequentially increasing.
+ for (int i = 1; i < blocks.size(); ++i) {
+ long nextBlockId = blocks.get(i).getBlock().getBlockId();
+ LOG.info("Block" + i + " id is " + nextBlockId);
+ assertThat(nextBlockId, is(nextBlockExpectedId));
+ ++nextBlockExpectedId;
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * Test that collisions in the block ID space are handled gracefully.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testTriggerBlockIdCollision() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+ MiniDFSCluster cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+
+ try {
+ cluster.waitActive();
+ FileSystem fs = cluster.getFileSystem();
+ FSNamesystem fsn = cluster.getNamesystem();
+ final int blockCount = 10;
+
+
+ // Create a file with a few blocks to rev up the global block ID
+ // counter.
+ Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
+ DFSTestUtil.createFile(
+ fs, path1, IO_SIZE, BLOCK_SIZE * blockCount,
+ BLOCK_SIZE, REPLICATION, SEED);
+ List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);
+
+
+ // Rewind the block ID counter in the name system object. This will result
+ // in block ID collisions when we try to allocate new blocks.
+ SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdManager()
+ .getBlockIdGenerator();
+ blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);
+
+ // Trigger collisions by creating a new file.
+ Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
+ DFSTestUtil.createFile(
+ fs, path2, IO_SIZE, BLOCK_SIZE * blockCount,
+ BLOCK_SIZE, REPLICATION, SEED);
+ List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
+ assertThat(blocks2.size(), is(blockCount));
+
+ // Make sure that file2 block IDs start immediately after file1
+ assertThat(blocks2.get(0).getBlock().getBlockId(),
+ is(blocks1.get(9).getBlock().getBlockId() + 1));
+
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * Test that the block type (legacy or not) can be correctly detected
+ * based on its generation stamp.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testBlockTypeDetection() throws IOException {
+
+ // Setup a mock object and stub out a few routines to
+ // retrieve the generation stamp counters.
+ BlockIdManager bid = mock(BlockIdManager.class);
+ final long maxGenStampForLegacyBlocks = 10000;
+
+ when(bid.getGenerationStampV1Limit())
+ .thenReturn(maxGenStampForLegacyBlocks);
+
+ Block legacyBlock = spy(new Block());
+ when(legacyBlock.getGenerationStamp())
+ .thenReturn(maxGenStampForLegacyBlocks/2);
+
+ Block newBlock = spy(new Block());
+ when(newBlock.getGenerationStamp())
+ .thenReturn(maxGenStampForLegacyBlocks+1);
+
+ // Make sure that isLegacyBlock() can correctly detect
+ // legacy and new blocks.
+ when(bid.isLegacyBlock(any(Block.class))).thenCallRealMethod();
+ assertThat(bid.isLegacyBlock(legacyBlock), is(true));
+ assertThat(bid.isLegacyBlock(newBlock), is(false));
+ }
+
+ /**
+ * Test that the generation stamp for legacy and new blocks is updated
+ * as expected.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testGenerationStampUpdate() throws IOException {
+ // Setup a mock object and stub out a few routines to
+ // retrieve the generation stamp counters.
+ BlockIdManager bid = mock(BlockIdManager.class);
+ final long nextGenerationStampV1 = 5000;
+ final long nextGenerationStampV2 = 20000;
+
+ when(bid.getNextGenerationStampV1())
+ .thenReturn(nextGenerationStampV1);
+ when(bid.getNextGenerationStampV2())
+ .thenReturn(nextGenerationStampV2);
+
+ // Make sure that the generation stamp is set correctly for both
+ // kinds of blocks.
+ when(bid.nextGenerationStamp(anyBoolean())).thenCallRealMethod();
+ assertThat(bid.nextGenerationStamp(true), is(nextGenerationStampV1));
+ assertThat(bid.nextGenerationStamp(false), is(nextGenerationStampV2));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index 4ef8798..1821e98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
@@ -516,8 +517,10 @@ public class TestSaveNamespace {
FSNamesystem spyFsn = spy(fsn);
final FSNamesystem finalFsn = spyFsn;
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
- doAnswer(delayer).when(spyFsn).getGenerationStampV2();
-
+ BlockIdManager bid = spy(spyFsn.getBlockIdManager());
+ Whitebox.setInternalState(finalFsn, "blockIdManager", bid);
+ doAnswer(delayer).when(bid).getGenerationStampV2();
+
ExecutorService pool = Executors.newFixedThreadPool(2);
try {
@@ -572,9 +575,7 @@ public class TestSaveNamespace {
NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX);
}
} finally {
- if (fsn != null) {
- fsn.close();
- }
+ fsn.close();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/571e9c62/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSequentialBlockId.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSequentialBlockId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSequentialBlockId.java
deleted file mode 100644
index e180d75..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSequentialBlockId.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.util.DataChecksum;
-import org.junit.Test;
-
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-
-/**
- * Tests the sequential block ID generation mechanism and block ID
- * collision handling.
- */
-public class TestSequentialBlockId {
-
- private static final Log LOG = LogFactory.getLog("TestSequentialBlockId");
-
- private static final DataChecksum DEFAULT_CHECKSUM =
- DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512);
-
- final int BLOCK_SIZE = 1024;
- final int IO_SIZE = BLOCK_SIZE;
- final short REPLICATION = 1;
- final long SEED = 0;
-
- DatanodeID datanode;
- InetSocketAddress dnAddr;
-
- /**
- * Test that block IDs are generated sequentially.
- *
- * @throws IOException
- */
- @Test
- public void testBlockIdGeneration() throws IOException {
- Configuration conf = new HdfsConfiguration();
- conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
- MiniDFSCluster cluster =
- new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
-
- try {
- cluster.waitActive();
- FileSystem fs = cluster.getFileSystem();
-
- // Create a file that is 10 blocks long.
- Path path = new Path("testBlockIdGeneration.dat");
- DFSTestUtil.createFile(
- fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
- List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
- LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
- long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;
-
- // Ensure that the block IDs are sequentially increasing.
- for (int i = 1; i < blocks.size(); ++i) {
- long nextBlockId = blocks.get(i).getBlock().getBlockId();
- LOG.info("Block" + i + " id is " + nextBlockId);
- assertThat(nextBlockId, is(nextBlockExpectedId));
- ++nextBlockExpectedId;
- }
- } finally {
- cluster.shutdown();
- }
- }
-
- /**
- * Test that collisions in the block ID space are handled gracefully.
- *
- * @throws IOException
- */
- @Test
- public void testTriggerBlockIdCollision() throws IOException {
- Configuration conf = new HdfsConfiguration();
- conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
- MiniDFSCluster cluster =
- new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
-
- try {
- cluster.waitActive();
- FileSystem fs = cluster.getFileSystem();
- FSNamesystem fsn = cluster.getNamesystem();
- final int blockCount = 10;
-
-
- // Create a file with a few blocks to rev up the global block ID
- // counter.
- Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
- DFSTestUtil.createFile(
- fs, path1, IO_SIZE, BLOCK_SIZE * blockCount,
- BLOCK_SIZE, REPLICATION, SEED);
- List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);
-
-
- // Rewind the block ID counter in the name system object. This will result
- // in block ID collisions when we try to allocate new blocks.
- SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdGenerator();
- blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);
-
- // Trigger collisions by creating a new file.
- Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
- DFSTestUtil.createFile(
- fs, path2, IO_SIZE, BLOCK_SIZE * blockCount,
- BLOCK_SIZE, REPLICATION, SEED);
- List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
- assertThat(blocks2.size(), is(blockCount));
-
- // Make sure that file2 block IDs start immediately after file1
- assertThat(blocks2.get(0).getBlock().getBlockId(),
- is(blocks1.get(9).getBlock().getBlockId() + 1));
-
- } finally {
- cluster.shutdown();
- }
- }
-
- /**
- * Test that the block type (legacy or not) can be correctly detected
- * based on its generation stamp.
- *
- * @throws IOException
- */
- @Test
- public void testBlockTypeDetection() throws IOException {
-
- // Setup a mock object and stub out a few routines to
- // retrieve the generation stamp counters.
- FSNamesystem fsn = mock(FSNamesystem.class);
- final long maxGenStampForLegacyBlocks = 10000;
-
- when(fsn.getGenerationStampV1Limit())
- .thenReturn(maxGenStampForLegacyBlocks);
-
- Block legacyBlock = spy(new Block());
- when(legacyBlock.getGenerationStamp())
- .thenReturn(maxGenStampForLegacyBlocks/2);
-
- Block newBlock = spy(new Block());
- when(newBlock.getGenerationStamp())
- .thenReturn(maxGenStampForLegacyBlocks+1);
-
- // Make sure that isLegacyBlock() can correctly detect
- // legacy and new blocks.
- when(fsn.isLegacyBlock(any(Block.class))).thenCallRealMethod();
- assertThat(fsn.isLegacyBlock(legacyBlock), is(true));
- assertThat(fsn.isLegacyBlock(newBlock), is(false));
- }
-
- /**
- * Test that the generation stamp for legacy and new blocks is updated
- * as expected.
- *
- * @throws IOException
- */
- @Test
- public void testGenerationStampUpdate() throws IOException {
-
- // Setup a mock object and stub out a few routines to
- // retrieve the generation stamp counters.
- FSNamesystem fsn = mock(FSNamesystem.class);
- FSEditLog editLog = mock(FSEditLog.class);
- final long nextGenerationStampV1 = 5000;
- final long nextGenerationStampV2 = 20000;
-
- when(fsn.getNextGenerationStampV1())
- .thenReturn(nextGenerationStampV1);
- when(fsn.getNextGenerationStampV2())
- .thenReturn(nextGenerationStampV2);
-
- // Make sure that the generation stamp is set correctly for both
- // kinds of blocks.
- when(fsn.nextGenerationStamp(anyBoolean())).thenCallRealMethod();
- when(fsn.hasWriteLock()).thenReturn(true);
- when(fsn.getEditLog()).thenReturn(editLog);
- assertThat(fsn.nextGenerationStamp(true), is(nextGenerationStampV1));
- assertThat(fsn.nextGenerationStamp(false), is(nextGenerationStampV2));
- }
-}
[21/25] hadoop git commit: HDFS-7387. NFS may only do partial commit
due to a race between COMMIT and write. Contributed by Brandon Li
Posted by vi...@apache.org.
HDFS-7387. NFS may only do partial commit due to a race between COMMIT and write. Contributed by Brandon Li
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99d9d0c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99d9d0c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99d9d0c2
Branch: refs/heads/HDFS-EC
Commit: 99d9d0c2d19b9f161b765947f3fb64619ea58090
Parents: 571e9c6
Author: Brandon Li <br...@apache.org>
Authored: Tue Nov 11 13:03:31 2014 -0800
Committer: Brandon Li <br...@apache.org>
Committed: Tue Nov 11 13:03:31 2014 -0800
----------------------------------------------------------------------
.../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 88 +++++++++++----
.../apache/hadoop/hdfs/nfs/nfs3/TestWrites.java | 107 +++++++++++++------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
3 files changed, 146 insertions(+), 52 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99d9d0c2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index b04c21c..b31baf5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -816,6 +816,42 @@ class OpenFileCtx {
return ret;
}
+ // Check if the to-commit range is sequential
+ @VisibleForTesting
+ synchronized boolean checkSequential(final long commitOffset,
+ final long nextOffset) {
+ Preconditions.checkState(commitOffset >= nextOffset, "commitOffset "
+ + commitOffset + " less than nextOffset " + nextOffset);
+ long offset = nextOffset;
+ Iterator<OffsetRange> it = pendingWrites.descendingKeySet().iterator();
+ while (it.hasNext()) {
+ OffsetRange range = it.next();
+ if (range.getMin() != offset) {
+ // got a hole
+ return false;
+ }
+ offset = range.getMax();
+ if (offset > commitOffset) {
+ return true;
+ }
+ }
+ // there is gap between the last pending write and commitOffset
+ return false;
+ }
+
+ private COMMIT_STATUS handleSpecialWait(boolean fromRead, long commitOffset,
+ Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
+ if (!fromRead) {
+ // let client retry the same request, add pending commit to sync later
+ CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr);
+ pendingCommits.put(commitOffset, commitCtx);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("return COMMIT_SPECIAL_WAIT");
+ }
+ return COMMIT_STATUS.COMMIT_SPECIAL_WAIT;
+ }
+
@VisibleForTesting
synchronized COMMIT_STATUS checkCommitInternal(long commitOffset,
Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) {
@@ -827,17 +863,34 @@ class OpenFileCtx {
return COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE;
}
}
- if (pendingWrites.isEmpty()) {
- // Note that, there is no guarantee data is synced. Caller should still
- // do a sync here though the output stream might be closed.
- return COMMIT_STATUS.COMMIT_FINISHED;
- }
long flushed = getFlushedOffset();
if (LOG.isDebugEnabled()) {
- LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset);
+ LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset
+ + "nextOffset=" + nextOffset.get());
}
-
+
+ if (pendingWrites.isEmpty()) {
+ if (aixCompatMode) {
+ // Note that, there is no guarantee data is synced. Caller should still
+ // do a sync here though the output stream might be closed.
+ return COMMIT_STATUS.COMMIT_FINISHED;
+ } else {
+ if (flushed < nextOffset.get()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("get commit while still writing to the requested offset,"
+ + " with empty queue");
+ }
+ return handleSpecialWait(fromRead, nextOffset.get(), channel, xid,
+ preOpAttr);
+ } else {
+ return COMMIT_STATUS.COMMIT_FINISHED;
+ }
+ }
+ }
+
+ Preconditions.checkState(flushed <= nextOffset.get(), "flushed " + flushed
+ + " is larger than nextOffset " + nextOffset.get());
// Handle large file upload
if (uploadLargeFile && !aixCompatMode) {
long co = (commitOffset > 0) ? commitOffset : pendingWrites.firstEntry()
@@ -846,21 +899,20 @@ class OpenFileCtx {
if (co <= flushed) {
return COMMIT_STATUS.COMMIT_DO_SYNC;
} else if (co < nextOffset.get()) {
- if (!fromRead) {
- // let client retry the same request, add pending commit to sync later
- CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
- preOpAttr);
- pendingCommits.put(commitOffset, commitCtx);
- }
if (LOG.isDebugEnabled()) {
- LOG.debug("return COMMIT_SPECIAL_WAIT");
+ LOG.debug("get commit while still writing to the requested offset");
}
- return COMMIT_STATUS.COMMIT_SPECIAL_WAIT;
+ return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
} else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("return COMMIT_SPECIAL_SUCCESS");
+ // co >= nextOffset
+ if (checkSequential(co, nextOffset.get())) {
+ return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("return COMMIT_SPECIAL_SUCCESS");
+ }
+ return COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS;
}
- return COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99d9d0c2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
index 96e6393..56603b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
@@ -217,14 +217,14 @@ public class TestWrites {
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
- ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
+ ctx.getPendingWritesForTest().put(new OffsetRange(10, 15),
new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
// Test request with non zero commit offset
ctx.setActiveStatusForTest(true);
- Mockito.when(fos.getPos()).thenReturn((long) 10);
+ Mockito.when(fos.getPos()).thenReturn((long) 8);
ctx.setNextOffsetForTest(10);
COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
@@ -232,35 +232,40 @@ public class TestWrites {
ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
+ // Test commit sequential writes
status = ctx.checkCommitInternal(10, ch, 1, attr, false);
- Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
+ Assert.assertTrue(status == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false);
- Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
+ Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
+ // Test commit non-sequential writes
ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
.getPendingCommitsForTest();
- Assert.assertTrue(commits.size() == 0);
- ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false);
+ Assert.assertTrue(commits.size() == 1);
+ ret = ctx.checkCommit(dfsClient, 16, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS);
- Assert.assertTrue(commits.size() == 0);
+ Assert.assertTrue(commits.size() == 1);
// Test request with zero commit offset
- commits.remove(new Long(11));
- // There is one pending write [5,10]
+ commits.remove(new Long(10));
+ // There is one pending write [10,15]
ret = ctx.checkCommitInternal(0, ch, 1, attr, false);
- Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_DO_SYNC);
+ Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
- Mockito.when(fos.getPos()).thenReturn((long) 6);
- ret = ctx.checkCommitInternal(8, ch, 1, attr, false);
+ ret = ctx.checkCommitInternal(9, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
- Assert.assertTrue(commits.size() == 1);
- long key = commits.firstKey();
- Assert.assertTrue(key == 8);
+ Assert.assertTrue(commits.size() == 2);
+ // Empty pending writes. nextOffset=10, flushed pos=8
+ ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15));
+ ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
+ Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
+
// Empty pending writes
- ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
+ ctx.setNextOffsetForTest((long) 8); // flushed pos = 8
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
+
}
@Test
@@ -286,6 +291,7 @@ public class TestWrites {
ctx.getPendingWritesForTest().put(new OffsetRange(0, 10),
new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
Mockito.when(fos.getPos()).thenReturn((long) 10);
+ ctx.setNextOffsetForTest((long)10);
status = ctx.checkCommitInternal(5, null, 1, attr, false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
}
@@ -317,7 +323,7 @@ public class TestWrites {
assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
- ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
+ ctx.getPendingWritesForTest().put(new OffsetRange(10, 15),
new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
@@ -326,6 +332,7 @@ public class TestWrites {
// Test request with non zero commit offset
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long) 10);
+ ctx.setNextOffsetForTest((long)10);
COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
// Do_SYNC state will be updated to FINISHED after data sync
@@ -355,7 +362,7 @@ public class TestWrites {
assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
// Empty pending writes
- ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
+ ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
@@ -386,7 +393,7 @@ public class TestWrites {
assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
- ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
+ ctx.getPendingWritesForTest().put(new OffsetRange(10, 15),
new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
@@ -394,7 +401,8 @@ public class TestWrites {
// Test request with non zero commit offset
ctx.setActiveStatusForTest(true);
- Mockito.when(fos.getPos()).thenReturn((long) 10);
+ Mockito.when(fos.getPos()).thenReturn((long) 6);
+ ctx.setNextOffsetForTest((long)10);
COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
// Do_SYNC state will be updated to FINISHED after data sync
@@ -402,32 +410,34 @@ public class TestWrites {
assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5));
- status = ctx.checkCommitInternal(10, ch, 1, attr, true);
- assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
- ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, true);
- assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
- assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 10));
-
+ // Test request with sequential writes
+ status = ctx.checkCommitInternal(9, ch, 1, attr, true);
+ assertTrue(status == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
+ ret = ctx.checkCommit(dfsClient, 9, ch, 1, attr, true);
+ assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret);
+ assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 9));
+
+ // Test request with non-sequential writes
ConcurrentNavigableMap<Long, CommitCtx> commits = ctx
.getPendingCommitsForTest();
assertTrue(commits.size() == 0);
- ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, true);
+ ret = ctx.checkCommit(dfsClient, 16, ch, 1, attr, true);
assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS, ret);
assertEquals(0, commits.size()); // commit triggered by read doesn't wait
- assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 11));
+ assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 16));
// Test request with zero commit offset
- // There is one pending write [5,10]
+ // There is one pending write [10,15]
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
- assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
+ assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret);
assertEquals(0, commits.size());
- assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
+ assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
// Empty pending writes
- ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
+ ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
- assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
- assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
+ assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret);
+ assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
}
private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
@@ -629,4 +639,33 @@ public class TestWrites {
}
}
}
+
+ @Test
+ public void testCheckSequential() throws IOException {
+ DFSClient dfsClient = Mockito.mock(DFSClient.class);
+ Nfs3FileAttributes attr = new Nfs3FileAttributes();
+ HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
+ Mockito.when(fos.getPos()).thenReturn((long) 0);
+ NfsConfiguration config = new NfsConfiguration();
+
+ config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
+ OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
+ new ShellBasedIdMapping(config), false, config);
+
+ ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
+ new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
+ ctx.getPendingWritesForTest().put(new OffsetRange(10, 15),
+ new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
+ ctx.getPendingWritesForTest().put(new OffsetRange(20, 25),
+ new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
+
+ assertTrue(!ctx.checkSequential(5, 4));
+ assertTrue(ctx.checkSequential(9, 5));
+ assertTrue(ctx.checkSequential(10, 5));
+ assertTrue(ctx.checkSequential(14, 5));
+ assertTrue(!ctx.checkSequential(15, 5));
+ assertTrue(!ctx.checkSequential(20, 5));
+ assertTrue(!ctx.checkSequential(25, 5));
+ assertTrue(!ctx.checkSequential(999, 5));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99d9d0c2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8922a0f..b183731 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -410,6 +410,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7366. BlockInfo should take replication as an short in the constructor.
(Li Lu via wheat9)
+ HDFS-7387. NFS may only do partial commit due to a race between COMMIT and write
+ (brandonli)
+
Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
[10/25] hadoop git commit: Release Notes for hadoop-2.6.0.
Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab30d513/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
index b3a4a13..85e3ae6 100644
--- a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
+++ b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
@@ -1,4 +1,3529 @@
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop 2.6.0 Release Notes</title>
+<STYLE type="text/css">
+ H1 {font-family: sans-serif}
+ H2 {font-family: sans-serif; margin-left: 7mm}
+ TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop 2.6.0 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements.
+<a name="changes"/>
+<h2>Changes since Hadoop 2.5.1</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2834">YARN-2834</a>.
+ Blocker bug reported by Yesha Vora and fixed by Jian He <br>
+ <b>Resource manager crashed with Null Pointer Exception</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2830">YARN-2830</a>.
+ Blocker bug reported by Jonathan Eagles and fixed by Jonathan Eagles <br>
+ <b>Add backwords compatible ContainerId.newInstance constructor for use within Tez Local Mode</b><br>
+ <blockquote>I just committed this. Thanks [~jeagles] for the patch and [~ozawa] for the reviews!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2827">YARN-2827</a>.
+ Critical bug reported by Wangda Tan and fixed by Wangda Tan (client)<br>
+ <b>Fix bugs of yarn queue CLI</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2826">YARN-2826</a>.
+ Critical bug reported by sidharta seethana and fixed by Wangda Tan <br>
+ <b>User-Group mappings not updated by RM when a user is removed from a group.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2825">YARN-2825</a>.
+ Critical bug reported by Jian He and fixed by Jian He <br>
+ <b>Container leak on NM</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2824">YARN-2824</a>.
+ Critical sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Capacity of labels should be zero by default</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2823">YARN-2823</a>.
+ Critical bug reported by Gour Saha and fixed by Jian He (resourcemanager)<br>
+ <b>NullPointerException in RM HA enabled 3-node cluster</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2819">YARN-2819</a>.
+ Critical bug reported by Gopal V and fixed by Zhijie Shen (timelineserver)<br>
+ <b>NPE in ATS Timeline Domains when upgrading from 2.4 to 2.6</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2818">YARN-2818</a>.
+ Critical bug reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Remove the logic to inject entity owner as the primary filter</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2813">YARN-2813</a>.
+ Major bug reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>NPE from MemoryTimelineStore.getDomains</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2812">YARN-2812</a>.
+ Major test reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>TestApplicationHistoryServer is likely to fail on less powerful machine</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2810">YARN-2810</a>.
+ Major test reported by Varun Vasudev and fixed by Varun Vasudev (resourcemanager)<br>
+ <b>TestRMProxyUsersConf fails on Windows VMs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2805">YARN-2805</a>.
+ Blocker bug reported by Arpit Gupta and fixed by Wangda Tan (resourcemanager)<br>
+ <b>RM2 in HA setup tries to login using the RM1's kerberos principal</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2804">YARN-2804</a>.
+ Critical bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+ <b>Timeline server .out log have JAXB binding exceptions and warnings.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2803">YARN-2803</a>.
+ Critical bug reported by Chris Nauroth and fixed by Craig Welch (nodemanager)<br>
+ <b>MR distributed cache not working correctly on Windows after NodeManager privileged account changes.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2798">YARN-2798</a>.
+ Blocker bug reported by Arpit Gupta and fixed by Zhijie Shen (timelineserver)<br>
+ <b>YarnClient doesn't need to translate Kerberos name of timeline DT renewer</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2795">YARN-2795</a>.
+ Major sub-task reported by Phil D'Amore and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Resource Manager fails startup with HDFS label storage and secure cluster</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2790">YARN-2790</a>.
+ Critical bug reported by Tassapol Athiapinya and fixed by Jian He (nodemanager)<br>
+ <b>NM can't aggregate logs past HDFS delegation token expiry.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2789">YARN-2789</a>.
+ Critical task reported by Siddharth Seth and fixed by Wangda Tan <br>
+ <b>Re-instate the NodeReport.newInstance API modified in YARN-2698</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2788">YARN-2788</a>.
+ Blocker bug reported by Gopal V and fixed by Xuan Gong (log-aggregation)<br>
+ <b>yarn logs -applicationId on 2.6.0 should support logs written by 2.4.0</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2785">YARN-2785</a>.
+ Major test reported by Varun Vasudev and fixed by Varun Vasudev <br>
+ <b>TestContainerResourceUsage fails intermittently</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2779">YARN-2779</a>.
+ Critical bug reported by Zhijie Shen and fixed by Zhijie Shen (resourcemanager , timelineserver)<br>
+ <b>SystemMetricsPublisher can use Kerberos directly instead of timeline DT</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2778">YARN-2778</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (client)<br>
+ <b>YARN node CLI should display labels on returned node reports</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2770">YARN-2770</a>.
+ Critical sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Timeline delegation tokens need to be automatically renewed by the RM</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2769">YARN-2769</a>.
+ Major bug reported by Varun Vasudev and fixed by Varun Vasudev (applications/distributed-shell)<br>
+ <b>Timeline server domain not set correctly when using shell_command on Windows</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2767">YARN-2767</a>.
+ Major test reported by Varun Vasudev and fixed by Varun Vasudev (resourcemanager)<br>
+ <b>RM web services - add test case to ensure the http static user cannot kill or submit apps in secure mode</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2760">YARN-2760</a>.
+ Trivial bug reported by Harsh J and fixed by Harsh J (documentation)<br>
+ <b>Completely remove word 'experimental' from FairScheduler docs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2758">YARN-2758</a>.
+ Major test reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Update TestApplicationHistoryClientService to use the new generic history store</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2755">YARN-2755</a>.
+ Critical bug reported by Siqi Li and fixed by Siqi Li <br>
+ <b>NM fails to clean up usercache_DEL_<timestamp> dirs after YARN-661</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2753">YARN-2753</a>.
+ Major sub-task reported by zhihai xu and fixed by zhihai xu <br>
+ <b>Fix potential issues and code clean up for *NodeLabelsManager</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2752">YARN-2752</a>.
+ Critical bug reported by Xuan Gong and fixed by Xuan Gong <br>
+ <b>ContainerExecutor always append "nice -n" in command on branch-2</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2747">YARN-2747</a>.
+ Major test reported by Xuan Gong and fixed by Xuan Gong <br>
+ <b>TestAggregatedLogFormat fails in trunk</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2744">YARN-2744</a>.
+ Critical sub-task reported by Sumit Mohanty and fixed by Wangda Tan (capacityscheduler)<br>
+ <b>Under some scenario, it is possible to end up with capacity scheduler configuration that uses labels that no longer exist</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2743">YARN-2743</a>.
+ Blocker bug reported by Arpit Gupta and fixed by Jian He (resourcemanager)<br>
+ <b>Yarn jobs via oozie fail with failed to renew token (secure) or digest mismatch (unsecure) errors when RM is being killed</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2741">YARN-2741</a>.
+ Major bug reported by Craig Welch and fixed by Craig Welch (nodemanager)<br>
+ <b>Windows: Node manager cannot serve up log files via the web user interface when yarn.nodemanager.log-dirs to any drive letter other than C: (or, the drive that nodemanager is running on)</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2734">YARN-2734</a>.
+ Major bug reported by Sumit Mohanty and fixed by Xuan Gong (log-aggregation)<br>
+ <b>If a sub-folder is encountered by log aggregator it results in invalid aggregated file</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2732">YARN-2732</a>.
+ Major bug reported by Jian He and fixed by Jian He <br>
+ <b>Fix syntax error in SecureContainer.apt.vm</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2730">YARN-2730</a>.
+ Critical bug reported by Siqi Li and fixed by Siqi Li <br>
+ <b>DefaultContainerExecutor runs only one localizer at a time</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2726">YARN-2726</a>.
+ Minor sub-task reported by Phil D'Amore and fixed by Wangda Tan (capacityscheduler)<br>
+ <b>CapacityScheduler should explicitly log when an accessible label has no capacity</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2724">YARN-2724</a>.
+ Major bug reported by Sumit Mohanty and fixed by Xuan Gong (log-aggregation)<br>
+ <b>If an unreadable file is encountered during log aggregation then aggregated file in HDFS badly formed</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2723">YARN-2723</a>.
+ Major sub-task reported by Phil D'Amore and fixed by Naganarasimha G R (client)<br>
+ <b>rmadmin -replaceLabelsOnNode does not correctly parse port</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2721">YARN-2721</a>.
+ Blocker bug reported by Jian He and fixed by Jian He <br>
+ <b>Race condition: ZKRMStateStore retry logic may throw NodeExist exception </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2720">YARN-2720</a>.
+ Major bug reported by Craig Welch and fixed by Craig Welch (nodemanager)<br>
+ <b>Windows: Wildcard classpath variables not expanded against resources contained in archives</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2717">YARN-2717</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (log-aggregation)<br>
+ <b>containerLogNotFound log shows multiple time for the same container</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2715">YARN-2715</a>.
+ Blocker bug reported by Zhijie Shen and fixed by Zhijie Shen (resourcemanager)<br>
+ <b>Proxy user is problem for RPC interface if yarn.resourcemanager.webapp.proxyuser is not set.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2711">YARN-2711</a>.
+ Major test reported by Varun Vasudev and fixed by Varun Vasudev <br>
+ <b>TestDefaultContainerExecutor#testContainerLaunchError fails on Windows</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2709">YARN-2709</a>.
+ Major sub-task reported by Li Lu and fixed by Li Lu <br>
+ <b>Add retry for timeline client getDelegationToken method</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2707">YARN-2707</a>.
+ Minor bug reported by Ted Yu and fixed by Gera Shegalov <br>
+ <b>Potential null dereference in FSDownload</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2705">YARN-2705</a>.
+ Critical sub-task reported by Wangda Tan and fixed by Wangda Tan (api , client , resourcemanager)<br>
+ <b>Changes of RM node label manager default configuration</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2704">YARN-2704</a>.
+ Critical sub-task reported by Jian He and fixed by Jian He <br>
+ <b> Localization and log-aggregation will fail if hdfs delegation token expired after token-max-life-time</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2703">YARN-2703</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+ <b>Add logUploadedTime into LogValue for better display</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2701">YARN-2701</a>.
+ Blocker bug reported by Xuan Gong and fixed by Xuan Gong <br>
+ <b>Potential race condition in startLocalizer when using LinuxContainerExecutor </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2700">YARN-2700</a>.
+ Major sub-task reported by Steve Loughran and fixed by Steve Loughran (api , resourcemanager)<br>
+ <b>TestSecureRMRegistryOperations failing on windows: auth problems</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2699">YARN-2699</a>.
+ Blocker sub-task reported by Wangda Tan and fixed by Wangda Tan (client)<br>
+ <b>Fix test timeout in TestResourceTrackerOnHA#testResourceTrackerOnHA</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2698">YARN-2698</a>.
+ Critical sub-task reported by Wangda Tan and fixed by Wangda Tan (api , client , resourcemanager)<br>
+ <b>Move getClusterNodeLabels and getNodeToLabels to YarnClient instead of AdminService</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2692">YARN-2692</a>.
+ Major sub-task reported by Steve Loughran and fixed by Steve Loughran <br>
+ <b>ktutil test hanging on some machines/ktutil versions</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2689">YARN-2689</a>.
+ Major sub-task reported by Steve Loughran and fixed by Steve Loughran (api , resourcemanager)<br>
+ <b>TestSecureRMRegistryOperations failing on windows: secure ZK won't start</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2685">YARN-2685</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (api , client , resourcemanager)<br>
+ <b>Resource on each label not correct when multiple NMs in a same host and some has label some not</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2682">YARN-2682</a>.
+ Minor bug reported by zhihai xu and fixed by zhihai xu (nodemanager)<br>
+ <b>WindowsSecureContainerExecutor should not depend on DefaultContainerExecutor#getFirstApplicationDir. </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2678">YARN-2678</a>.
+ Major sub-task reported by Gour Saha and fixed by Steve Loughran (api , resourcemanager)<br>
+ <b>Improved Yarn Registry service record structure</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2677">YARN-2677</a>.
+ Major sub-task reported by Steve Loughran and fixed by Steve Loughran (api , resourcemanager)<br>
+ <b>registry punycoding of usernames doesn't fix all usernames to be DNS-valid</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2676">YARN-2676</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Timeline authentication filter should add support for proxy user</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2673">YARN-2673</a>.
+ Major sub-task reported by Li Lu and fixed by Li Lu <br>
+ <b>Add retry for timeline client put APIs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2671">YARN-2671</a>.
+ Blocker bug reported by Zhijie Shen and fixed by Wangda Tan (resourcemanager)<br>
+ <b>ApplicationSubmissionContext change breaks the existing app submission</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2668">YARN-2668</a>.
+ Major sub-task reported by Steve Loughran and fixed by Steve Loughran (client)<br>
+ <b>yarn-registry JAR won't link against ZK 3.4.5</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2667">YARN-2667</a>.
+ Minor bug reported by Yi Liu and fixed by Yi Liu <br>
+ <b>Fix the release audit warning caused by hadoop-yarn-registry</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2662">YARN-2662</a>.
+ Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+ <b>TestCgroupsLCEResourcesHandler leaks file descriptors.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2656">YARN-2656</a>.
+ Major bug reported by Varun Vasudev and fixed by Zhijie Shen (resourcemanager)<br>
+ <b>RM web services authentication filter should add support for proxy user</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2652">YARN-2652</a>.
+ Major sub-task reported by Steve Loughran and fixed by Steve Loughran (api)<br>
+ <b>add hadoop-yarn-registry package under hadoop-yarn</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2651">YARN-2651</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+ <b>Spin off the LogRollingInterval from LogAggregationContext</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2649">YARN-2649</a>.
+ Major bug reported by Ming Ma and fixed by Ming Ma <br>
+ <b>Flaky test TestAMRMRPCNodeUpdates</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2647">YARN-2647</a>.
+ Major sub-task reported by Wangda Tan and fixed by Sunil G (client)<br>
+ <b>Add yarn queue CLI to get queue infos</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2644">YARN-2644</a>.
+ Major sub-task reported by Craig Welch and fixed by Craig Welch <br>
+ <b>Recalculate headroom more frequently to keep it accurate</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2632">YARN-2632</a>.
+ Blocker sub-task reported by Junping Du and fixed by Junping Du (nodemanager)<br>
+ <b>Document NM Restart feature</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2630">YARN-2630</a>.
+ Major bug reported by Jian He and fixed by Jian He <br>
+ <b>TestDistributedShell#testDSRestartWithPreviousRunningContainers fails</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2629">YARN-2629</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Make distributed shell use the domain-based timeline ACLs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2628">YARN-2628</a>.
+ Major bug reported by Varun Vasudev and fixed by Varun Vasudev (capacityscheduler)<br>
+ <b>Capacity scheduler with DominantResourceCalculator carries out reservation even though slots are free</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2627">YARN-2627</a>.
+ Major improvement reported by Xuan Gong and fixed by Xuan Gong <br>
+ <b>Add logs when attemptFailuresValidityInterval is enabled</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2624">YARN-2624</a>.
+ Blocker bug reported by Anubhav Dhoot and fixed by Anubhav Dhoot (nodemanager)<br>
+ <b>Resource Localization fails on a cluster due to existing cache directories</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2621">YARN-2621</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Simplify the output when the user doesn't have the access for getDomain(s) </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2617">YARN-2617</a>.
+ Major bug reported by Jun Gong and fixed by Jun Gong (nodemanager)<br>
+ <b>NM does not need to send finished container whose APP is not running to RM</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2615">YARN-2615</a>.
+ Blocker sub-task reported by Junping Du and fixed by Junping Du <br>
+ <b>ClientToAMTokenIdentifier and DelegationTokenIdentifier should allow extended fields</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2611">YARN-2611</a>.
+ Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (capacityscheduler , resourcemanager , scheduler)<br>
+ <b>Fix jenkins findbugs warning and test case failures for trunk merge patch</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2610">YARN-2610</a>.
+ Major bug reported by Ray Chiang and fixed by Ray Chiang <br>
+ <b>Hamlet should close table tags</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2608">YARN-2608</a>.
+ Major bug reported by Wei Yan and fixed by Wei Yan <br>
+ <b>FairScheduler: Potential deadlocks in loading alloc files and clock access</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2607">YARN-2607</a>.
+ Major test reported by Ted Yu and fixed by Wangda Tan <br>
+ <b>TestDistributedShell fails in trunk</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2606">YARN-2606</a>.
+ Major bug reported by Mit Desai and fixed by Mit Desai (timelineserver)<br>
+ <b>Application History Server tries to access hdfs before doing secure login</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2602">YARN-2602</a>.
+ Major bug reported by Karam Singh and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Generic History Service of TimelineServer sometimes not able to handle NPE</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2596">YARN-2596</a>.
+ Major test reported by Junping Du and fixed by Karthik Kambatla <br>
+ <b>TestWorkPreservingRMRestart fails with FairScheduler</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2594">YARN-2594</a>.
+ Blocker bug reported by Karam Singh and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Potential deadlock in RM when querying ApplicationResourceUsageReport</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2591">YARN-2591</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>AHSWebServices should return FORBIDDEN(403) if the request user doesn't have access to the history data</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2588">YARN-2588</a>.
+ Major bug reported by Rohith and fixed by Rohith (resourcemanager)<br>
+ <b>Standby RM does not transitionToActive if previous transitionToActive is failed with ZK exception.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2584">YARN-2584</a>.
+ Major test reported by Zhijie Shen and fixed by Jian He <br>
+ <b>TestContainerManagerSecurity fails on trunk</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2583">YARN-2583</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+ <b>Modify the LogDeletionService to support Log aggregation for LRS</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2582">YARN-2582</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+ <b>Log related CLI and Web UI changes for Aggregated Logs in LRS</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2581">YARN-2581</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+ <b>NMs need to find a way to get LogAggregationContext</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2579">YARN-2579</a>.
+ Blocker bug reported by Rohith and fixed by Rohith (resourcemanager)<br>
+ <b>Deadlock when EmbeddedElectorService and FatalEventDispatcher try to transition RM to StandBy at the same time</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2577">YARN-2577</a>.
+ Trivial improvement reported by Miklos Christine and fixed by Miklos Christine (documentation , fairscheduler)<br>
+ <b>Clarify ACL delimiter and how to configure ACL groups only</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2576">YARN-2576</a>.
+ Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (capacityscheduler , resourcemanager , scheduler)<br>
+ <b>Prepare yarn-1051 branch for merging with trunk</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2569">YARN-2569</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+ <b>API changes for handling logs of long-running services</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2568">YARN-2568</a>.
+ Major bug reported by Jian He and fixed by Jian He <br>
+ <b>TestAMRMClientOnRMRestart test fails</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2566">YARN-2566</a>.
+ Critical sub-task reported by zhihai xu and fixed by zhihai xu (nodemanager)<br>
+ <b>DefaultContainerExecutor should pick a working directory randomly</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2565">YARN-2565</a>.
+ Major bug reported by Karam Singh and fixed by Zhijie Shen (resourcemanager , timelineserver)<br>
+ <b>RM shouldn't use the old RMApplicationHistoryWriter unless explicitly setting FileSystemApplicationHistoryStore</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2563">YARN-2563</a>.
+ Blocker bug reported by Arpit Gupta and fixed by Zhijie Shen (timelineserver)<br>
+ <b>On secure clusters call to timeline server fails with authentication errors when running a job via oozie</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2562">YARN-2562</a>.
+ Critical bug reported by Vinod Kumar Vavilapalli and fixed by Tsuyoshi OZAWA <br>
+ <b>ContainerId@toString() is unreadable for epoch >0 after YARN-2182</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2561">YARN-2561</a>.
+ Blocker sub-task reported by Tassapol Athiapinya and fixed by Junping Du <br>
+ <b>MR job client cannot reconnect to AM after NM restart.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2559">YARN-2559</a>.
+ Major bug reported by Karam Singh and fixed by Zhijie Shen (resourcemanager , timelineserver)<br>
+ <b>ResourceManager sometime become un-responsive due to NPE in SystemMetricsPublisher</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2558">YARN-2558</a>.
+ Blocker sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+ <b>Updating ContainerTokenIdentifier#read/write to use ContainerId#getContainerId</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2557">YARN-2557</a>.
+ Major bug reported by Xuan Gong and fixed by Xuan Gong (applications/distributed-shell)<br>
+ <b>Add a parameter "attempt_Failures_Validity_Interval" in DistributedShell </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2549">YARN-2549</a>.
+ Minor test reported by Chris Nauroth and fixed by Chris Nauroth (nodemanager , test)<br>
+ <b>TestContainerLaunch fails due to classpath problem with hamcrest classes.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2547">YARN-2547</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Mit Desai (timelineserver)<br>
+ <b>Cross Origin Filter throws UnsupportedOperationException upon destroy</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2546">YARN-2546</a>.
+ Major bug reported by Doug Haigh and fixed by Varun Vasudev (api)<br>
+ <b>REST API for application creation/submission is using strings for numeric & boolean values</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2544">YARN-2544</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (api , client , resourcemanager)<br>
+ <b>Common server side PB changes (not include user API PB changes)</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2542">YARN-2542</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+ <b>"yarn application -status <appId>" throws NPE when retrieving the app from the timelineserver</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2541">YARN-2541</a>.
+ Major bug reported by Jian He and fixed by Jian He <br>
+ <b>Fix ResourceManagerRest.apt.vm syntax error</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2540">YARN-2540</a>.
+ Major bug reported by Ashwin Shankar and fixed by Ashwin Shankar (scheduler)<br>
+ <b>FairScheduler: Queue filters not working on scheduler page in RM UI</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2539">YARN-2539</a>.
+ Minor improvement reported by Wei Yan and fixed by Wei Yan <br>
+ <b>FairScheduler: Set the default value for maxAMShare to 0.5</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2538">YARN-2538</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+ <b>Add logs when RM send new AMRMToken to ApplicationMaster</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2534">YARN-2534</a>.
+ Major bug reported by zhihai xu and fixed by zhihai xu (scheduler)<br>
+ <b>FairScheduler: Potential integer overflow calculating totalMaxShare</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2531">YARN-2531</a>.
+ Major improvement reported by Varun Vasudev and fixed by Varun Vasudev <br>
+ <b>CGroups - Admins should be allowed to enforce strict cpu limits</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2529">YARN-2529</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Generic history service RPC interface doesn't work when service authorization is enabled</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2528">YARN-2528</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (timelineserver)<br>
+ <b>Cross Origin Filter Http response split vulnerability protection rejects valid origins</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2527">YARN-2527</a>.
+ Major bug reported by Benoy Antony and fixed by Benoy Antony (resourcemanager)<br>
+ <b>NPE in ApplicationACLsManager</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2526">YARN-2526</a>.
+ Critical bug reported by Wei Yan and fixed by Wei Yan (scheduler-load-simulator)<br>
+ <b>SLS can deadlock when all the threads are taken by AMSimulators</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2523">YARN-2523</a>.
+ Major bug reported by Nishan Shetty and fixed by Rohith (resourcemanager , webapp)<br>
+ <b>ResourceManager UI showing negative value for "Decommissioned Nodes" field</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2519">YARN-2519</a>.
+ Major test reported by Xiaoyu Yao and fixed by Xiaoyu Yao (webapp)<br>
+ <b>Credential Provider related unit tests failed on Windows</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2515">YARN-2515</a>.
+ Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+ <b>Update ConverterUtils#toContainerId to parse epoch</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2512">YARN-2512</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (timelineserver)<br>
+ <b>Allow for origin pattern matching in cross origin filter</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2511">YARN-2511</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (timelineserver)<br>
+ <b>Allow All Origins by default when Cross Origin Filter is enabled</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2509">YARN-2509</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Mit Desai (timelineserver)<br>
+ <b>Enable Cross Origin Filter for timeline server only and not all Yarn servers</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2508">YARN-2508</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Mit Desai (timelineserver)<br>
+ <b>Cross Origin configuration parameters prefix are not honored</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2507">YARN-2507</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (documentation , timelineserver)<br>
+ <b>Document Cross Origin Filter Configuration for ATS</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2505">YARN-2505</a>.
+ Major sub-task reported by Wangda Tan and fixed by Craig Welch (resourcemanager)<br>
+ <b>Support get/add/remove/change labels in RM REST API</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2504">YARN-2504</a>.
+ Critical sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Support get/add/remove/change labels in RM admin CLI </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2503">YARN-2503</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Changes in RM Web UI to better show labels to end users</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2502">YARN-2502</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Changes in distributed shell to support specify labels</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2501">YARN-2501</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Changes in AMRMClient to support labels</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2500">YARN-2500</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Miscellaneous changes in ResourceManager to support labels</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2496">YARN-2496</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Changes for capacity scheduler to support allocate resource respect labels</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2494">YARN-2494</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+ <b>Node label manager API and storage implementations</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2493">YARN-2493</a>.
+ Major sub-task reported by Wangda Tan and fixed by Wangda Tan (api)<br>
+ <b>API changes for users</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2484">YARN-2484</a>.
+ Trivial bug reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+ <b>FileSystemRMStateStore#readFile/writeFile should close FSData(In|Out)putStream in final block</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2475">YARN-2475</a>.
+ Major sub-task reported by Carlo Curino and fixed by Carlo Curino (resourcemanager)<br>
+ <b>ReservationSystem: replan upon capacity reduction</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2468">YARN-2468</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (log-aggregation , nodemanager , resourcemanager)<br>
+ <b>Log handling for LRS</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2462">YARN-2462</a>.
+ Major bug reported by Jason Lowe and fixed by Eric Payne <br>
+ <b>TestNodeManagerResync#testBlockNewContainerRequestsOnStartAndResync should have a test timeout</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2460">YARN-2460</a>.
+ Minor bug reported by Ray Chiang and fixed by Ray Chiang <br>
+ <b>Remove obsolete entries from yarn-default.xml</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2459">YARN-2459</a>.
+ Major bug reported by Mayank Bansal and fixed by Mayank Bansal (resourcemanager)<br>
+ <b>RM crashes if App gets rejected for any reason and HA is enabled</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2456">YARN-2456</a>.
+ Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+ <b>Possible livelock in CapacityScheduler when RM is recovering apps</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2453">YARN-2453</a>.
+ Major bug reported by zhihai xu and fixed by zhihai xu <br>
+ <b>TestProportionalCapacityPreemptionPolicy fails with FairScheduler</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2452">YARN-2452</a>.
+ Major bug reported by zhihai xu and fixed by zhihai xu <br>
+ <b>TestRMApplicationHistoryWriter fails with FairScheduler</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2450">YARN-2450</a>.
+ Trivial bug reported by Ray Chiang and fixed by Ray Chiang <br>
+ <b>Fix typos in log messages</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2449">YARN-2449</a>.
+ Critical bug reported by Karam Singh and fixed by Varun Vasudev (timelineserver)<br>
+ <b>Timelineserver returns invalid Delegation token in secure kerberos enabled cluster when hadoop.http.filter.initializers are not set</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2448">YARN-2448</a>.
+ Major improvement reported by Varun Vasudev and fixed by Varun Vasudev <br>
+ <b>RM should expose the resource types considered during scheduling when AMs register</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2447">YARN-2447</a>.
+ Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+ <b>RM web services app submission doesn't pass secrets correctly</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2446">YARN-2446</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Using TimelineNamespace to shield the entities of a user</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2440">YARN-2440</a>.
+ Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+ <b>Cgroups should allow YARN containers to be limited to allocated cores</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2434">YARN-2434</a>.
+ Major sub-task reported by Jian He and fixed by Jian He <br>
+ <b>RM should not recover containers from previously failed attempt when AM restart is not enabled</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2431">YARN-2431</a>.
+ Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+ <b>NM restart: cgroup is not removed for reacquired containers</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2424">YARN-2424</a>.
+ Blocker bug reported by Allen Wittenauer and fixed by Allen Wittenauer (nodemanager)<br>
+ <b>LCE should support non-cgroups, non-secure mode</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2411">YARN-2411</a>.
+ Major improvement reported by Ram Venkatesh and fixed by Ram Venkatesh (capacityscheduler)<br>
+ <b>[Capacity Scheduler] support simple user and group mappings to queues</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2409">YARN-2409</a>.
+ Critical bug reported by Nishan Shetty and fixed by Rohith (resourcemanager)<br>
+ <b>Active to StandBy transition does not stop rmDispatcher that causes 1 AsyncDispatcher thread leak. </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2406">YARN-2406</a>.
+ Major sub-task reported by Jian He and fixed by Tsuyoshi OZAWA <br>
+ <b>Move RM recovery related proto to yarn_server_resourcemanager_recovery.proto</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2405">YARN-2405</a>.
+ Major bug reported by Maysam Yabandeh and fixed by Tsuyoshi OZAWA <br>
+ <b>NPE in FairSchedulerAppsBlock</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2400">YARN-2400</a>.
+ Major bug reported by Jian He and fixed by Jian He <br>
+ <b>TestAMRestart fails intermittently</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2399">YARN-2399</a>.
+ Major improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (fairscheduler)<br>
+ <b>FairScheduler: Merge AppSchedulable and FSSchedulerApp into FSAppAttempt</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2397">YARN-2397</a>.
+ Critical bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+ <b>RM and TS web interfaces sometimes return request is a replay error in secure mode</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2395">YARN-2395</a>.
+ Major new feature reported by Ashwin Shankar and fixed by Wei Yan (fairscheduler)<br>
+ <b>FairScheduler: Preemption timeout should be configurable per queue</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2394">YARN-2394</a>.
+ Major new feature reported by Ashwin Shankar and fixed by Wei Yan (fairscheduler)<br>
+ <b>FairScheduler: Configure fairSharePreemptionThreshold per queue</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2393">YARN-2393</a>.
+ Major new feature reported by Ashwin Shankar and fixed by Wei Yan (fairscheduler)<br>
+ <b>FairScheduler: Add the notion of steady fair share</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2389">YARN-2389</a>.
+ Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (capacityscheduler , fairscheduler)<br>
+ <b>Adding support for drainig a queue, ie killing all apps in the queue</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2388">YARN-2388</a>.
+ Major test reported by Zhijie Shen and fixed by Zhijie Shen <br>
+ <b>TestTimelineWebServices fails on trunk after HADOOP-10791</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2387">YARN-2387</a>.
+ Blocker bug reported by Mit Desai and fixed by Mit Desai <br>
+ <b>Resource Manager crashes with NPE due to lack of synchronization</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2378">YARN-2378</a>.
+ Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (capacityscheduler)<br>
+ <b>Adding support for moving apps between queues in Capacity Scheduler</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2377">YARN-2377</a>.
+ Major improvement reported by Gera Shegalov and fixed by Gera Shegalov (nodemanager)<br>
+ <b>Localization exception stack traces are not passed as diagnostic info</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2374">YARN-2374</a>.
+ Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+ <b>YARN trunk build failing TestDistributedShell.testDSShell</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2373">YARN-2373</a>.
+ Major bug reported by Larry McCay and fixed by Larry McCay <br>
+ <b>WebAppUtils Should Use configuration.getPassword for Accessing SSL Passwords</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2372">YARN-2372</a>.
+ Minor improvement reported by Fengdong Yu and fixed by Fengdong Yu (documentation)<br>
+ <b>There are Chinese Characters in the FairScheduler's document</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2370">YARN-2370</a>.
+ Trivial bug reported by Wenwu Peng and fixed by Wenwu Peng (resourcemanager)<br>
+ <b>Fix comment in o.a.h.y.server.resourcemanager.schedulerAppSchedulingInfo</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2363">YARN-2363</a>.
+ Major bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
+ <b>Submitted applications occasionally lack a tracking URL</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2361">YARN-2361</a>.
+ Trivial improvement reported by zhihai xu and fixed by zhihai xu (resourcemanager)<br>
+ <b>RMAppAttempt state machine entries for KILLED state has duplicate event entries</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2360">YARN-2360</a>.
+ Major new feature reported by Ashwin Shankar and fixed by Ashwin Shankar (fairscheduler)<br>
+ <b>Fair Scheduler: Display dynamic fair share for queues on the scheduler page</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2359">YARN-2359</a>.
+ Critical bug reported by zhihai xu and fixed by zhihai xu (resourcemanager)<br>
+ <b>Application hangs when it fails to launch AM container </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2354">YARN-2354</a>.
+ Major sub-task reported by Jian He and fixed by Li Lu <br>
+ <b>DistributedShell may allocate more containers than client specified after it restarts</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2352">YARN-2352</a>.
+ Major improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (scheduler)<br>
+ <b>FairScheduler: Collect metrics on duration of critical methods that affect performance</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2347">YARN-2347</a>.
+ Major sub-task reported by Junping Du and fixed by Junping Du <br>
+ <b>Consolidate RMStateVersion and NMDBSchemaVersion into StateVersion in yarn-server-common</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2343">YARN-2343</a>.
+ Trivial improvement reported by Li Lu and fixed by Li Lu <br>
+ <b>Improve error message on token expire exception</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2337">YARN-2337</a>.
+ Trivial improvement reported by zhihai xu and fixed by zhihai xu (resourcemanager)<br>
+ <b>ResourceManager sets ClientRMService in RMContext multiple times</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2323">YARN-2323</a>.
+ Minor improvement reported by Hong Zhiguo and fixed by Hong Zhiguo (fairscheduler)<br>
+ <b>FairShareComparator creates too many Resource objects</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2321">YARN-2321</a>.
+ Major bug reported by Leitao Guo and fixed by Leitao Guo (nodemanager)<br>
+ <b>NodeManager web UI can incorrectly report Pmem enforcement</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2317">YARN-2317</a>.
+ Major sub-task reported by Li Lu and fixed by Li Lu (documentation)<br>
+ <b>Update documentation about how to write YARN applications</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2314">YARN-2314</a>.
+ Critical bug reported by Jason Lowe and fixed by Jason Lowe (client)<br>
+ <b>ContainerManagementProtocolProxy can create thousands of threads for a large cluster</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2313">YARN-2313</a>.
+ Major bug reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (fairscheduler)<br>
+ <b>Livelock can occur in FairScheduler when there are lots of running apps</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2312">YARN-2312</a>.
+ Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+ <b>Marking ContainerId#getId as deprecated</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2308">YARN-2308</a>.
+ Critical bug reported by Wangda Tan and fixed by chang li (resourcemanager , scheduler)<br>
+ <b>NPE happened when RM restart after CapacityScheduler queue configuration changed </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2302">YARN-2302</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Refactor TimelineWebServices</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2298">YARN-2298</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (client)<br>
+ <b>Move TimelineClient to yarn-common project</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2295">YARN-2295</a>.
+ Major sub-task reported by Li Lu and fixed by Li Lu <br>
+ <b>Refactor YARN distributed shell with existing public stable API</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2288">YARN-2288</a>.
+ Major sub-task reported by Junping Du and fixed by Junping Du (timelineserver)<br>
+ <b>Data persistent in timelinestore should be versioned</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2279">YARN-2279</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+ <b>Add UTs to cover timeline server authentication</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2277">YARN-2277</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (timelineserver)<br>
+ <b>Add Cross-Origin support to the ATS REST API</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2274">YARN-2274</a>.
+ Trivial improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (fairscheduler)<br>
+ <b>FairScheduler: Add debug information about cluster capacity, availability and reservations</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2273">YARN-2273</a>.
+ Major bug reported by Andy Skelton and fixed by Wei Yan (fairscheduler , resourcemanager)<br>
+ <b>NPE in ContinuousScheduling thread when we lose a node</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2269">YARN-2269</a>.
+ Major bug reported by Yesha Vora and fixed by Craig Welch <br>
+ <b>External links need to be removed from YARN UI</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2264">YARN-2264</a>.
+ Major bug reported by Siddharth Seth and fixed by Li Lu <br>
+ <b>Race in DrainDispatcher can cause random test failures</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2260">YARN-2260</a>.
+ Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+ <b>Add containers to launchedContainers list in RMNode on container recovery</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2252">YARN-2252</a>.
+ Major bug reported by Ratandeep Ratti and fixed by (scheduler)<br>
+ <b>Intermittent failure of TestFairScheduler.testContinuousScheduling</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2251">YARN-2251</a>.
+ Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+ <b>Avoid negative elapsed time in JHS/MRAM web UI and services</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2249">YARN-2249</a>.
+ Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+ <b>AM release request may be lost on RM restart</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2248">YARN-2248</a>.
+ Major sub-task reported by Janos Matyas and fixed by Janos Matyas (capacityscheduler)<br>
+ <b>Capacity Scheduler changes for moving apps between queues</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2244">YARN-2244</a>.
+ Critical bug reported by Anubhav Dhoot and fixed by Anubhav Dhoot (fairscheduler)<br>
+ <b>FairScheduler missing handling of containers for unknown application attempts </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2242">YARN-2242</a>.
+ Major sub-task reported by Li Lu and fixed by Li Lu <br>
+ <b>Improve exception information on AM launch crashes</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2237">YARN-2237</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+ <b>MRAppMaster changes for AMRMToken roll-up</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2229">YARN-2229</a>.
+ Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+ <b>ContainerId can overflow with RM restart</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2228">YARN-2228</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+ <b>TimelineServer should load pseudo authentication filter when authentication = simple</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2219">YARN-2219</a>.
+ Major bug reported by Ashwin Shankar and fixed by Jian He (resourcemanager)<br>
+ <b>AMs and NMs can get exceptions after recovery but before scheduler knowns apps and app-attempts</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2214">YARN-2214</a>.
+ Major improvement reported by Ashwin Shankar and fixed by Ashwin Shankar (scheduler)<br>
+ <b>FairScheduler: preemptContainerPreCheck() in FSParentQueue delays convergence towards fairness</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2212">YARN-2212</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+ <b>ApplicationMaster needs to find a way to update the AMRMToken periodically</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2211">YARN-2211</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+ <b>RMStateStore needs to save AMRMToken master key for recovery when RM restart/failover happens </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2209">YARN-2209</a>.
+ Major improvement reported by Jian He and fixed by Jian He <br>
+ <b>Replace AM resync/shutdown command with corresponding exceptions</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2208">YARN-2208</a>.
+ Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+ <b>AMRMTokenManager need to have a way to roll over AMRMToken</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2207">YARN-2207</a>.
+ Major task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+ <b>Add ability to roll over AMRMToken</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2198">YARN-2198</a>.
+ Major improvement reported by Remus Rusanu and fixed by Remus Rusanu <br>
+ <b>Remove the need to run NodeManager as privileged account for Windows Secure Container Executor</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2197">YARN-2197</a>.
+ Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+ <b>Add a link to YARN CHANGES.txt in the left side of doc</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2182">YARN-2182</a>.
+ Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+ <b>Update ContainerId#toString() to avoid conflicts before and after RM restart</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2181">YARN-2181</a>.
+ Major bug reported by Wangda Tan and fixed by Wangda Tan (resourcemanager , webapp)<br>
+ <b>Add preemption info to RM Web UI and add logs when preemption occurs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2174">YARN-2174</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+ <b>Enabling HTTPs for the writer REST API of TimelineServer</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2161">YARN-2161</a>.
+ Major bug reported by Binglin Chang and fixed by Binglin Chang <br>
+ <b>Fix build on macosx: YARN parts</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2158">YARN-2158</a>.
+ Minor test reported by Ted Yu and fixed by Varun Vasudev <br>
+ <b>TestRMWebServicesAppsModification sometimes fails in trunk</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2153">YARN-2153</a>.
+ Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+ <b>Ensure distributed shell work with RM work-preserving recovery</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2147">YARN-2147</a>.
+ Minor bug reported by Jason Lowe and fixed by Chen He (resourcemanager)<br>
+ <b>client lacks delegation token exception details when application submit fails</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2138">YARN-2138</a>.
+ Major bug reported by Jian He and fixed by Varun Saxena <br>
+ <b>Cleanup notifyDone* methods in RMStateStore</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2131">YARN-2131</a>.
+ Major new feature reported by Karthik Kambatla and fixed by Robert Kanter (resourcemanager)<br>
+ <b>Add a way to format the RMStateStore</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2102">YARN-2102</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+ <b>More generalized timeline ACLs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2088">YARN-2088</a>.
+ Major bug reported by Binglin Chang and fixed by Binglin Chang <br>
+ <b>Fix code bug in GetApplicationsRequestPBImpl#mergeLocalToBuilder</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2080">YARN-2080</a>.
+ Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (resourcemanager)<br>
+ <b>Admission Control: Integrate Reservation subsystem with ResourceManager</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2070">YARN-2070</a>.
+ Minor sub-task reported by Zhijie Shen and fixed by Robert Kanter <br>
+ <b>DistributedShell publishes unfriendly user information to the timeline server</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2045">YARN-2045</a>.
+ Major sub-task reported by Junping Du and fixed by Junping Du (nodemanager)<br>
+ <b>Data persisted in NM should be versioned</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2035">YARN-2035</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles <br>
+ <b>FileSystemApplicationHistoryStore blocks RM and AHS while NN is in safemode</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2034">YARN-2034</a>.
+ Minor bug reported by Jason Lowe and fixed by Chen He (nodemanager)<br>
+ <b>Description for yarn.nodemanager.localizer.cache.target-size-mb is incorrect</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2033">YARN-2033</a>.
+ Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+ <b>Merging generic-history into the Timeline Store</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2026">YARN-2026</a>.
+ Major bug reported by Ashwin Shankar and fixed by Ashwin Shankar (scheduler)<br>
+ <b>Fair scheduler: Consider only active queues for computing fairshare</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2013">YARN-2013</a>.
+ Major sub-task reported by Zhijie Shen and fixed by Tsuyoshi OZAWA (nodemanager)<br>
+ <b>The diagnostics is always the ExitCodeException stack when the container crashes</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2010">YARN-2010</a>.
+ Blocker bug reported by bc Wong and fixed by Karthik Kambatla (resourcemanager)<br>
+ <b>Handle app-recovery failures gracefully</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2008">YARN-2008</a>.
+ Major sub-task reported by Chen He and fixed by Craig Welch <br>
+ <b>CapacityScheduler may report incorrect queueMaxCap if there is hierarchy queue structure </b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2001">YARN-2001</a>.
+ Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+ <b>Threshold for RM to accept requests from AM after failover</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1994">YARN-1994</a>.
+ Major improvement reported by Arpit Agarwal and fixed by Craig Welch (nodemanager , resourcemanager , webapp)<br>
+ <b>Expose YARN/MR endpoints on multiple interfaces</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1972">YARN-1972</a>.
+ Major sub-task reported by Remus Rusanu and fixed by Remus Rusanu (nodemanager)<br>
+ <b>Implement secure Windows Container Executor</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1959">YARN-1959</a>.
+ Major bug reported by Sandy Ryza and fixed by Anubhav Dhoot <br>
+ <b>Fix headroom calculation in FairScheduler</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1954">YARN-1954</a>.
+ Major improvement reported by Zhijie Shen and fixed by Tsuyoshi OZAWA (client)<br>
+ <b>Add waitFor to AMRMClient(Async)</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1922">YARN-1922</a>.
+ Major bug reported by Billie Rinaldi and fixed by Billie Rinaldi (nodemanager)<br>
+ <b>Process group remains alive after container process is killed externally</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1919">YARN-1919</a>.
+ Minor bug reported by Devaraj K and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+ <b>Potential NPE in EmbeddedElectorService#stop</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1918">YARN-1918</a>.
+ Trivial improvement reported by Devaraj K and fixed by Anandha L Ranganathan <br>
+ <b>Typo in description and error message for 'yarn.resourcemanager.cluster-id'</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1915">YARN-1915</a>.
+ Blocker sub-task reported by Hitesh Shah and fixed by Jason Lowe <br>
+ <b>ClientToAMTokenMasterKey should be provided to AM at launch time</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1879">YARN-1879</a>.
+ Critical sub-task reported by Jian He and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+ <b>Mark Idempotent/AtMostOnce annotations to ApplicationMasterProtocol for RM fail over</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1857">YARN-1857</a>.
+ Critical sub-task reported by Thomas Graves and fixed by Chen He (capacityscheduler)<br>
+ <b>CapacityScheduler headroom doesn't account for other AM's running</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1796">YARN-1796</a>.
+ Minor bug reported by Aaron T. Myers and fixed by Aaron T. Myers (nodemanager)<br>
+ <b>container-executor shouldn't require o-r permissions</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1779">YARN-1779</a>.
+ Blocker sub-task reported by Karthik Kambatla and fixed by Jian He (resourcemanager)<br>
+ <b>Handle AMRMTokens across RM failover</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1769">YARN-1769</a>.
+ Major improvement reported by Thomas Graves and fixed by Thomas Graves (capacityscheduler)<br>
+ <b>CapacityScheduler: Improve reservations</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1712">YARN-1712</a>.
+ Major sub-task reported by Carlo Curino and fixed by Carlo Curino (capacityscheduler , resourcemanager)<br>
+ <b>Admission Control: plan follower</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1711">YARN-1711</a>.
+ Major sub-task reported by Carlo Curino and fixed by Carlo Curino <br>
+ <b>CapacityOverTimePolicy: a policy to enforce quotas over time for YARN-1709</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1710">YARN-1710</a>.
+ Major sub-task reported by Carlo Curino and fixed by Carlo Curino (resourcemanager)<br>
+ <b>Admission Control: agents to allocate reservation</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1709">YARN-1709</a>.
+ Major sub-task reported by Carlo Curino and fixed by Subru Krishnan (resourcemanager)<br>
+ <b>Admission Control: Reservation subsystem</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1708">YARN-1708</a>.
+ Major sub-task reported by Carlo Curino and fixed by Subru Krishnan (resourcemanager)<br>
+ <b>Add a public API to reserve resources (part of YARN-1051)</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1707">YARN-1707</a>.
+ Major sub-task reported by Carlo Curino and fixed by Carlo Curino (capacityscheduler)<br>
+ <b>Making the CapacityScheduler more dynamic</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1372">YARN-1372</a>.
+ Major sub-task reported by Bikas Saha and fixed by Anubhav Dhoot (resourcemanager)<br>
+ <b>Ensure all completed containers are reported to the AMs across RM restart</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1370">YARN-1370</a>.
+ Major sub-task reported by Bikas Saha and fixed by Anubhav Dhoot (resourcemanager)<br>
+ <b>Fair scheduler to re-populate container allocation state</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1367">YARN-1367</a>.
+ Major sub-task reported by Bikas Saha and fixed by Anubhav Dhoot (resourcemanager)<br>
+ <b>After restart NM should resync with the RM without killing containers</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1354">YARN-1354</a>.
+ Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+ <b>Recover applications upon nodemanager restart</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1342">YARN-1342</a>.
+ Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+ <b>Recover container tokens upon nodemanager restart</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1341">YARN-1341</a>.
+ Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+ <b>Recover NMTokens upon nodemanager restart</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1337">YARN-1337</a>.
+ Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+ <b>Recover containers upon nodemanager restart</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1326">YARN-1326</a>.
+ Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+ <b>RM should log using RMStore at startup time</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1250">YARN-1250</a>.
+ Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+ <b>Generic history service should support application-acls</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1063">YARN-1063</a>.
+ Major sub-task reported by Kyle Leckie and fixed by Remus Rusanu (nodemanager)<br>
+ <b>Winutils needs ability to create task as domain user</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1051">YARN-1051</a>.
+ Major improvement reported by Carlo Curino and fixed by Carlo Curino (capacityscheduler , resourcemanager , scheduler)<br>
+ <b>YARN Admission Control/Planner: enhancing the resource allocation model with time.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-668">YARN-668</a>.
+ Blocker sub-task reported by Siddharth Seth and fixed by Junping Du <br>
+ <b>TokenIdentifier serialization should consider Unknown fields</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-611">YARN-611</a>.
+ Major sub-task reported by Chris Riccomini and fixed by Xuan Gong (resourcemanager)<br>
+ <b>Add an AM retry count reset window to YARN RM</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-415">YARN-415</a>.
+ Major new feature reported by Kendall Thrapp and fixed by Eric Payne (resourcemanager)<br>
+ <b>Capture aggregate memory allocation at the app-level for chargeback</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-90">YARN-90</a>.
+ Major sub-task reported by Ravi Gummadi and fixed by Varun Vasudev (nodemanager)<br>
+ <b>NodeManager should identify failed disks becoming good again</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6142">MAPREDUCE-6142</a>.
+ Critical sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+ <b>Test failure in TestJobHistoryEventHandler and TestMRTimelineEventHandling</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6126">MAPREDUCE-6126</a>.
+ Major bug reported by Junping Du and fixed by Junping Du <br>
+ <b>(Rumen) Rumen tool returns error "ava.lang.IllegalArgumentException: JobBuilder.process(HistoryEvent): unknown event type"</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6125">MAPREDUCE-6125</a>.
+ Major bug reported by Mit Desai and fixed by Mit Desai (test)<br>
+ <b>TestContainerLauncherImpl sometimes fails</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6123">MAPREDUCE-6123</a>.
+ Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+ <b>TestCombineFileInputFormat incorrectly starts 2 MiniDFSCluster instances.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6122">MAPREDUCE-6122</a>.
+ Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+ <b>TestLineRecordReader may fail due to test data files checked out of git with incorrect line endings.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6115">MAPREDUCE-6115</a>.
+ Minor test reported by Ted Yu and fixed by Binglin Chang <br>
+ <b>TestPipeApplication#testSubmitter fails in trunk</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6109">MAPREDUCE-6109</a>.
+ Trivial bug reported by Charles Lamb and fixed by Charles Lamb (distcp)<br>
+ <b>Fix minor typo in distcp -p usage text</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6104">MAPREDUCE-6104</a>.
+ Major bug reported by Mit Desai and fixed by Mit Desai <br>
+ <b>TestJobHistoryParsing.testPartialJob fails in branch-2</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6095">MAPREDUCE-6095</a>.
+ Major bug reported by Gera Shegalov and fixed by Gera Shegalov (applicationmaster , distributed-cache)<br>
+ <b>Enable DistributedCache for uber-mode Jobs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6094">MAPREDUCE-6094</a>.
+ Minor bug reported by Sangjin Lee and fixed by Akira AJISAKA (test)<br>
+ <b>TestMRCJCFileInputFormat.testAddInputPath() fails on trunk</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6093">MAPREDUCE-6093</a>.
+ Trivial bug reported by Charles Lamb and fixed by Charles Lamb (distcp , documentation)<br>
+ <b>minor distcp doc edits</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6091">MAPREDUCE-6091</a>.
+ Major bug reported by Sangjin Lee and fixed by Sangjin Lee (client)<br>
+ <b>YARNRunner.getJobStatus() fails with ApplicationNotFoundException if the job rolled off the RM view</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6090">MAPREDUCE-6090</a>.
+ Major bug reported by Robert Kanter and fixed by Robert Kanter (client)<br>
+ <b>mapred hsadmin getGroups fails to connect in some cases</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6086">MAPREDUCE-6086</a>.
+ Major improvement reported by zhihai xu and fixed by zhihai xu (security)<br>
+ <b>mapreduce.job.credentials.binary should allow all URIs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6075">MAPREDUCE-6075</a>.
+ Major bug reported by Jason Lowe and fixed by Jason Lowe (jobhistoryserver)<br>
+ <b>HistoryServerFileSystemStateStore can create zero-length files</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6073">MAPREDUCE-6073</a>.
+ Trivial bug reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (documentation)<br>
+ <b>Description of mapreduce.job.speculative.slowtaskthreshold in mapred-default should be moved into description tags</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6072">MAPREDUCE-6072</a>.
+ Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+ <b>Remove INSTALL document</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6071">MAPREDUCE-6071</a>.
+ Trivial improvement reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (client)<br>
+ <b>JobImpl#makeUberDecision doesn't log that Uber mode is disabled because of too much CPUs</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6070">MAPREDUCE-6070</a>.
+ Trivial improvement reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (documentation)<br>
+ <b>yarn.app.am.resource.mb/cpu-vcores affects uber mode but is not documented</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6063">MAPREDUCE-6063</a>.
+ Major bug reported by zhihai xu and fixed by zhihai xu (mrv1 , mrv2)<br>
+ <b>In sortAndSpill of MapTask.java, size is calculated wrongly when bufend < bufstart.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6052">MAPREDUCE-6052</a>.
+ Major bug reported by Junping Du and fixed by Junping Du <br>
+ <b>Support overriding log4j.properties per job</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6051">MAPREDUCE-6051</a>.
+ Trivial bug reported by Ray Chiang and fixed by Ray Chiang <br>
+ <b>Fix typos in log messages</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6048">MAPREDUCE-6048</a>.
+ Minor test reported by Ted Yu and fixed by Varun Vasudev <br>
+ <b>TestJavaSerialization fails in trunk build</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6044">MAPREDUCE-6044</a>.
+ Major bug reported by Zhijie Shen and fixed by Zhijie Shen (jobhistoryserver)<br>
+ <b>Fully qualified intermediate done directory will break per-user dir creation on Windows</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6036">MAPREDUCE-6036</a>.
+ Major bug reported by Mit Desai and fixed by chang li <br>
+ <b>TestJobEndNotifier fails intermittently in branch-2</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6032">MAPREDUCE-6032</a>.
+ Major bug reported by Benjamin Zhitomirsky and fixed by Benjamin Zhitomirsky (jobhistoryserver)<br>
+ <b>Unable to check mapreduce job status if submitted using a non-default namenode</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6029">MAPREDUCE-6029</a>.
+ Major bug reported by Ted Yu and fixed by Mit Desai <br>
+ <b>TestCommitterEventHandler fails in trunk</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6024">MAPREDUCE-6024</a>.
+ Critical improvement reported by zhaoyunjiong and fixed by zhaoyunjiong (mr-am , task)<br>
+ <b>java.net.SocketTimeoutException in Fetcher caused jobs stuck for more than 1 hour</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6022">MAPREDUCE-6022</a>.
+ Major bug reported by Jason Lowe and fixed by Jason Lowe <br>
+ <b>map_input_file is missing from streaming job environment</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6021">MAPREDUCE-6021</a>.
+ Major bug reported by Jason Lowe and fixed by Jason Lowe (mr-am)<br>
+ <b>MR AM should have working directory in LD_LIBRARY_PATH</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6019">MAPREDUCE-6019</a>.
+ Major bug reported by Xuan Gong and fixed by Craig Welch <br>
+ <b>MapReduce changes for exposing YARN/MR endpoints on multiple interfaces.</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6018">MAPREDUCE-6018</a>.
+ Major sub-task reported by Jonathan Eagles and fixed by Robert Kanter <br>
+ <b>Create a framework specific config to enable timeline server</b><br>
+ <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/
<TRUNCATED>
[08/25] hadoop git commit: YARN-2834. Fixed ResourceManager to ignore
token-renewal failures on recovery consistent with the (somewhat incorrect)
behaviour in the non-recovery case. Contributed by Jian He.
Posted by vi...@apache.org.
YARN-2834. Fixed ResourceManager to ignore token-renewal failures on recovery consistent with the (somewhat incorrect) behaviour in the non-recovery case. Contributed by Jian He.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e76faebc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e76faebc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e76faebc
Branch: refs/heads/HDFS-EC
Commit: e76faebc9589654e83c8244ef9aff88391e56b80
Parents: 770cc14
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Sun Nov 9 18:56:06 2014 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Sun Nov 9 18:56:58 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 4 ++
.../server/resourcemanager/rmapp/RMAppImpl.java | 27 ++------
.../rmapp/attempt/RMAppAttemptImpl.java | 4 +-
.../TestWorkPreservingRMRestart.java | 67 +++++++++++++++++---
.../rmapp/TestRMAppTransitions.java | 28 --------
5 files changed, 70 insertions(+), 60 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76faebc/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8abdb9c..b76e28f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -925,6 +925,10 @@ Release 2.6.0 - 2014-11-15
YARN-2830. Add backwords compatible ContainerId.newInstance constructor.
(jeagles via acmurthy)
+ YARN-2834. Fixed ResourceManager to ignore token-renewal failures on recovery
+ consistent with the (somewhat incorrect) behaviour in the non-recovery case.
+ (Jian He via vinodkv)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76faebc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 9b10872..ad92cc4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -714,7 +714,7 @@ public class RMAppImpl implements RMApp, Recoverable {
}
@Override
- public void recover(RMState state) throws Exception{
+ public void recover(RMState state) {
ApplicationState appState = state.getApplicationState().get(getApplicationId());
this.recoveredFinalState = appState.getState();
LOG.info("Recovering app: " + getApplicationId() + " with " +
@@ -830,14 +830,7 @@ public class RMAppImpl implements RMApp, Recoverable {
public RMAppState transition(RMAppImpl app, RMAppEvent event) {
RMAppRecoverEvent recoverEvent = (RMAppRecoverEvent) event;
- try {
- app.recover(recoverEvent.getRMState());
- } catch (Exception e) {
- String msg = app.applicationId + " failed to recover. " + e.getMessage();
- failToRecoverApp(app, event, msg, e);
- return RMAppState.FINAL_SAVING;
- }
-
+ app.recover(recoverEvent.getRMState());
// The app has completed.
if (app.recoveredFinalState != null) {
app.recoverAppAttempts();
@@ -852,10 +845,10 @@ public class RMAppImpl implements RMApp, Recoverable {
app.getApplicationId(), app.parseCredentials(),
app.submissionContext.getCancelTokensWhenComplete(), app.getUser());
} catch (Exception e) {
- String msg = "Failed to renew delegation token on recovery for "
- + app.applicationId + e.getMessage();
- failToRecoverApp(app, event, msg, e);
- return RMAppState.FINAL_SAVING;
+ String msg = "Failed to renew token for " + app.applicationId
+ + " on recovery : " + e.getMessage();
+ app.diagnostics.append(msg);
+ LOG.error(msg, e);
}
}
@@ -892,14 +885,6 @@ public class RMAppImpl implements RMApp, Recoverable {
// Thus we return ACCECPTED state on recovery.
return RMAppState.ACCEPTED;
}
-
- private void failToRecoverApp(RMAppImpl app, RMAppEvent event, String msg,
- Exception e) {
- app.diagnostics.append(msg);
- LOG.error(msg, e);
- app.rememberTargetTransitionsAndStoreState(event, new FinalTransition(
- RMAppState.FAILED), RMAppState.FAILED, RMAppState.FAILED);
- }
}
private static final class AddApplicationToSchedulerTransition extends
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76faebc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index d3fe151..0d7e334 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -789,7 +789,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
}
@Override
- public void recover(RMState state) throws Exception {
+ public void recover(RMState state) {
ApplicationState appState =
state.getApplicationState().get(getAppAttemptId().getApplicationId());
ApplicationAttemptState attemptState =
@@ -823,7 +823,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
}
private void recoverAppAttemptCredentials(Credentials appAttemptTokens,
- RMAppAttemptState state) throws IOException {
+ RMAppAttemptState state) {
if (appAttemptTokens == null || state == RMAppAttemptState.FAILED
|| state == RMAppAttemptState.FINISHED
|| state == RMAppAttemptState.KILLED) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76faebc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index 2f0a839..1cefcf8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -18,15 +18,15 @@
package org.apache.hadoop.yarn.server.resourcemanager;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -35,9 +35,10 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.service.Service;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -50,9 +51,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSParentQueue;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.TestSecurityMockRM;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
@@ -71,9 +70,13 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSParentQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
@@ -1011,4 +1014,50 @@ public class TestWorkPreservingRMRestart {
am0.unregisterAppAttempt(false);
}
+ @Test (timeout = 30000)
+ public void testAppFailedToRenewTokenOnRecovery() throws Exception {
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+ conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
+ UserGroupInformation.setConfiguration(conf);
+ MemoryRMStateStore memStore = new MemoryRMStateStore();
+ memStore.init(conf);
+ MockRM rm1 = new TestSecurityMockRM(conf, memStore);
+ rm1.start();
+ MockNM nm1 =
+ new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
+ nm1.registerNode();
+ RMApp app1 = rm1.submitApp(200);
+ MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
+
+ MockRM rm2 = new TestSecurityMockRM(conf, memStore) {
+ protected DelegationTokenRenewer createDelegationTokenRenewer() {
+ return new DelegationTokenRenewer() {
+ @Override
+ public void addApplicationSync(ApplicationId applicationId,
+ Credentials ts, boolean shouldCancelAtEnd, String user)
+ throws IOException {
+ throw new IOException("Token renew failed !!");
+ }
+ };
+ }
+ };
+ nm1.setResourceTrackerService(rm2.getResourceTrackerService());
+ rm2.start();
+ NMContainerStatus containerStatus =
+ TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(), 1,
+ ContainerState.RUNNING);
+ nm1.registerNode(Arrays.asList(containerStatus), null);
+
+ // am re-register
+ rm2.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
+ am1.setAMRMProtocol(rm2.getApplicationMasterService(), rm2.getRMContext());
+ am1.registerAppAttempt(true);
+ rm2.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
+
+ // Because the token expired, am could crash.
+ nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
+ rm2.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
+ rm2.waitForState(app1.getApplicationId(), RMAppState.FAILED);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76faebc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index ecb6b5c..bbfb0ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -540,34 +540,6 @@ public class TestRMAppTransitions {
}
@Test (timeout = 30000)
- public void testAppRecoverToFailed() throws IOException {
- LOG.info("--- START: testAppRecoverToFailed ---");
- ApplicationSubmissionContext sub =
- Records.newRecord(ApplicationSubmissionContext.class);
- ContainerLaunchContext clc =
- Records.newRecord(ContainerLaunchContext.class);
- Credentials credentials = new Credentials();
- DataOutputBuffer dob = new DataOutputBuffer();
- credentials.writeTokenStorageToStream(dob);
- ByteBuffer securityTokens =
- ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
- clc.setTokens(securityTokens);
- sub.setAMContainerSpec(clc);
-
- RMApp application = createNewTestApp(sub);
- // NEW => FINAL_SAVING, event RMAppEventType.RECOVER
- RMState state = new RMState();
- RMAppEvent event =
- new RMAppRecoverEvent(application.getApplicationId(), state);
- // NPE will throw on recovery.
- application.handle(event);
- assertAppState(RMAppState.FINAL_SAVING, application);
- sendAppUpdateSavedEvent(application);
- rmDispatcher.await();
- assertAppState(RMAppState.FAILED, application);
- }
-
- @Test (timeout = 30000)
public void testAppNewKill() throws IOException {
LOG.info("--- START: testAppNewKill ---");
[15/25] hadoop git commit: HADOOP-9576. Changed
NetUtils#wrapException to throw EOFException instead of wrapping it as
IOException. Contributed by Steve Loughran
Posted by vi...@apache.org.
HADOOP-9576. Changed NetUtils#wrapException to throw EOFException instead of wrapping it as IOException. Contributed by Steve Loughran
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86bf8c71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86bf8c71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86bf8c71
Branch: refs/heads/HDFS-EC
Commit: 86bf8c7193013834f67e03bd67a320cc080ef32c
Parents: 2cc868d
Author: Jian He <ji...@apache.org>
Authored: Mon Nov 10 17:17:01 2014 -0800
Committer: Jian He <ji...@apache.org>
Committed: Mon Nov 10 17:25:29 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/net/NetUtils.java | 8 ++++++++
.../test/java/org/apache/hadoop/net/TestNetUtils.java | 12 ++++++++++++
3 files changed, 23 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bf8c71/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 51266a8..26c39cf 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -422,6 +422,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11294. Nfs3FileAttributes should not change the values of rdev,
nlink and size in the constructor. (Brandon Li via wheat9)
+ HADOOP-9576. Changed NetUtils#wrapException to throw EOFException instead
+ of wrapping it as IOException. (Steve Loughran via jianhe)
+
Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bf8c71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index 9ee0f3e..b535dda 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.net;
+import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -759,6 +760,13 @@ public class NetUtils {
+ " failed on socket timeout exception: " + exception
+ ";"
+ see("NoRouteToHost"));
+ } else if (exception instanceof EOFException) {
+ return wrapWithMessage(exception,
+ "End of File Exception between "
+ + getHostDetailsAsString(destHost, destPort, localHost)
+ + ": " + exception
+ + ";"
+ + see("EOFException"));
}
else {
return (IOException) new IOException("Failed on local exception: "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bf8c71/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index b03afca..319e8a9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.net;
import static org.junit.Assert.*;
+import java.io.EOFException;
import java.io.IOException;
import java.net.BindException;
import java.net.ConnectException;
@@ -257,6 +258,17 @@ public class TestNetUtils {
}
@Test
+ public void testWrapEOFException() throws Throwable {
+ IOException e = new EOFException("eof");
+ IOException wrapped = verifyExceptionClass(e, EOFException.class);
+ assertInException(wrapped, "eof");
+ assertWikified(wrapped);
+ assertInException(wrapped, "localhost");
+ assertRemoteDetailsIncluded(wrapped);
+ assertInException(wrapped, "/EOFException");
+ }
+
+ @Test
public void testGetConnectAddress() throws IOException {
NetUtils.addStaticResolution("host", "127.0.0.1");
InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
[23/25] hadoop git commit: HDFS-7389. Named user ACL cannot stop the
user from accessing the FS entity. Contributed by Vinayakumar B.
Posted by vi...@apache.org.
HDFS-7389. Named user ACL cannot stop the user from accessing the FS entity. Contributed by Vinayakumar B.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/163bb550
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/163bb550
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/163bb550
Branch: refs/heads/HDFS-EC
Commit: 163bb55067bde71246b4030a08256ba9a8182dc8
Parents: 456b973
Author: cnauroth <cn...@apache.org>
Authored: Tue Nov 11 13:29:55 2014 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Tue Nov 11 13:29:55 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../server/namenode/FSPermissionChecker.java | 1 +
.../hdfs/server/namenode/FSAclBaseTest.java | 37 ++++++++++++++++++--
3 files changed, 39 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/163bb550/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b183731..07762bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -413,6 +413,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7387. NFS may only do partial commit due to a race between COMMIT and write
(brandonli)
+ HDFS-7389. Named user ACL cannot stop the user from accessing the FS entity.
+ (Vinayakumar B via cnauroth)
+
Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/163bb550/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 2c48051..f994f6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -327,6 +327,7 @@ class FSPermissionChecker {
return;
}
foundMatch = true;
+ break;
}
} else if (type == AclEntryType.GROUP) {
// Use group entry (unnamed or named) with mask from permission bits
http://git-wip-us.apache.org/repos/asf/hadoop/blob/163bb550/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index adca0aa..5066feb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -65,6 +65,9 @@ public abstract class FSAclBaseTest {
private static final UserGroupInformation SUPERGROUP_MEMBER =
UserGroupInformation.createUserForTesting("super", new String[] {
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT });
+ // group member
+ private static final UserGroupInformation BOB = UserGroupInformation
+ .createUserForTesting("bob", new String[] { "groupY", "groupZ" });
protected static MiniDFSCluster cluster;
protected static Configuration conf;
@@ -74,7 +77,7 @@ public abstract class FSAclBaseTest {
@Rule
public ExpectedException exception = ExpectedException.none();
- private FileSystem fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember;
+ private FileSystem fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember, fsAsBob;
@AfterClass
public static void shutdown() {
@@ -93,7 +96,7 @@ public abstract class FSAclBaseTest {
@After
public void destroyFileSystems() {
IOUtils.cleanup(null, fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember);
- fs = fsAsBruce = fsAsDiana = fsAsSupergroupMember = null;
+ fs = fsAsBruce = fsAsDiana = fsAsSupergroupMember = fsAsBob = null;
}
@Test
@@ -1283,6 +1286,35 @@ public abstract class FSAclBaseTest {
} catch (FileNotFoundException e) {
// expected
}
+
+ // Add a named group entry with only READ access
+ fsAsBruce.modifyAclEntries(p1, Lists.newArrayList(
+ aclEntry(ACCESS, GROUP, "groupY", READ)));
+ // Now bob should have read access, but not write
+ fsAsBob.access(p1, READ);
+ try {
+ fsAsBob.access(p1, WRITE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected;
+ }
+
+ // Add another named group entry with WRITE access
+ fsAsBruce.modifyAclEntries(p1, Lists.newArrayList(
+ aclEntry(ACCESS, GROUP, "groupZ", WRITE)));
+ // Now bob should have write access
+ fsAsBob.access(p1, WRITE);
+
+ // Add a named user entry to deny bob
+ fsAsBruce.modifyAclEntries(p1,
+ Lists.newArrayList(aclEntry(ACCESS, USER, "bob", NONE)));
+
+ try {
+ fsAsBob.access(p1, READ);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected;
+ }
}
/**
@@ -1316,6 +1348,7 @@ public abstract class FSAclBaseTest {
fs = createFileSystem();
fsAsBruce = createFileSystem(BRUCE);
fsAsDiana = createFileSystem(DIANA);
+ fsAsBob = createFileSystem(BOB);
fsAsSupergroupMember = createFileSystem(SUPERGROUP_MEMBER);
}
[07/25] hadoop git commit: Updated CHANGES.txt to reflect
hadoop-2.6.0 release dates.
Posted by vi...@apache.org.
Updated CHANGES.txt to reflect hadoop-2.6.0 release dates.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/770cc144
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/770cc144
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/770cc144
Branch: refs/heads/HDFS-EC
Commit: 770cc144425b6188d9b93178c77a35cd830a60df
Parents: f62ec31
Author: Arun C. Murthy <ac...@apache.org>
Authored: Sun Nov 9 18:47:21 2014 -0800
Committer: Arun C. Murthy <ac...@apache.org>
Committed: Sun Nov 9 18:47:21 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
hadoop-mapreduce-project/CHANGES.txt | 2 +-
hadoop-yarn-project/CHANGES.txt | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/770cc144/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ef487bb..cf91b30 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -417,7 +417,7 @@ Release 2.7.0 - UNRELEASED
HADOOP-11187 NameNode - KMS communication fails after a long period of
inactivity. (Arun Suresh via atm)
-Release 2.6.0 - UNRELEASED
+Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/770cc144/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6904686..beac1c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -407,7 +407,7 @@ Release 2.7.0 - UNRELEASED
HDFS-7366. BlockInfo should take replication as an short in the constructor.
(Li Lu via wheat9)
-Release 2.6.0 - UNRELEASED
+Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/770cc144/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 573408e..44dc557 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -236,7 +236,7 @@ Release 2.7.0 - UNRELEASED
BUG FIXES
-Release 2.6.0 - UNRELEASED
+Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/770cc144/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4ea0726..8abdb9c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -72,7 +72,7 @@ Release 2.7.0 - UNRELEASED
YARN-2713. "RM Home" link in NM should point to one of the RMs in an
HA setup. (kasha)
-Release 2.6.0 - UNRELEASED
+Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
[16/25] hadoop git commit: YARN-2841. RMProxy should retry
EOFException. Contributed by Jian He
Posted by vi...@apache.org.
YARN-2841. RMProxy should retry EOFException. Contributed by Jian He
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c9a51f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c9a51f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c9a51f1
Branch: refs/heads/HDFS-EC
Commit: 5c9a51f140ba76ddb25580aeb288db25e3f9653f
Parents: 86bf8c7
Author: Xuan <xg...@apache.org>
Authored: Mon Nov 10 18:25:01 2014 -0800
Committer: Xuan <xg...@apache.org>
Committed: Mon Nov 10 18:25:01 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 2 ++
.../src/main/java/org/apache/hadoop/yarn/client/RMProxy.java | 2 ++
.../main/java/org/apache/hadoop/yarn/client/ServerProxy.java | 2 ++
.../hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java | 7 +++++++
4 files changed, 13 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c9a51f1/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 259f4e2..e134d6b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -929,6 +929,8 @@ Release 2.6.0 - 2014-11-15
consistent with the (somewhat incorrect) behaviour in the non-recovery case.
(Jian He via vinodkv)
+ YARN-2841. RMProxy should retry EOFException. (Jian He via xgong)
+
Release 2.5.2 - 2014-11-10
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c9a51f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
index ee09973..fa8d642 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.client;
+import java.io.EOFException;
import java.io.IOException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
@@ -240,6 +241,7 @@ public class RMProxy<T> {
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
+ exceptionToPolicyMap.put(EOFException.class, retryPolicy);
exceptionToPolicyMap.put(ConnectException.class, retryPolicy);
exceptionToPolicyMap.put(NoRouteToHostException.class, retryPolicy);
exceptionToPolicyMap.put(UnknownHostException.class, retryPolicy);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c9a51f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
index 6c72dc0..b6fea62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.client;
+import java.io.EOFException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.NoRouteToHostException;
@@ -67,6 +68,7 @@ public class ServerProxy {
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
+ exceptionToPolicyMap.put(EOFException.class, retryPolicy);
exceptionToPolicyMap.put(ConnectException.class, retryPolicy);
exceptionToPolicyMap.put(NoRouteToHostException.class, retryPolicy);
exceptionToPolicyMap.put(UnknownHostException.class, retryPolicy);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c9a51f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index b34262b..e367085 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
@@ -737,8 +738,14 @@ public class TestNodeStatusUpdater {
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
heartBeatID++;
+ if(heartBeatID == 1) {
+ // EOFException should be retried as well.
+ throw new EOFException("NodeHeartbeat exception");
+ }
+ else {
throw new java.net.ConnectException(
"NodeHeartbeat exception");
+ }
}
}
[14/25] hadoop git commit: HADOOP-11296. Nfs3FileAttributes should
not change the values of rdev,
nlink and size in the constructor. Contributed by Brandon Li.
Posted by vi...@apache.org.
HADOOP-11296. Nfs3FileAttributes should not change the values of rdev, nlink and size in the constructor. Contributed by Brandon Li.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cc868de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cc868de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cc868de
Branch: refs/heads/HDFS-EC
Commit: 2cc868dede3187ef3e122e878b8ee0882c82dc81
Parents: 68a0508
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Nov 10 15:42:47 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Nov 10 15:42:58 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 ++
.../hadoop/nfs/nfs3/Nfs3FileAttributes.java | 29 ++++++++------------
.../apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java | 25 ++++++++++++-----
3 files changed, 32 insertions(+), 25 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc868de/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d3c4c00..51266a8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -419,6 +419,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11289. Fix typo in RpcUtil log message. (Charles Lamb via wheat9)
+ HADOOP-11294. Nfs3FileAttributes should not change the values of rdev,
+ nlink and size in the constructor. (Brandon Li via wheat9)
+
Release 2.6.0 - 2014-11-15
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc868de/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java
index 47126d6..2832166 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java
@@ -49,8 +49,6 @@ public class Nfs3FileAttributes {
* values should be agreed upon by the client and server. If the client and
* server do not agree upon the values, the client should treat these fields
* as if they are set to 0.
- * <br>
- * For Hadoop, currently this field is always zero.
*/
public static class Specdata3 {
final int specdata1;
@@ -82,20 +80,17 @@ public class Nfs3FileAttributes {
}
public Nfs3FileAttributes() {
- this(NfsFileType.NFSREG, 0, (short)0, 0, 0, 0, 0, 0, 0, 0);
+ this(NfsFileType.NFSREG, 1, (short)0, 0, 0, 0, 0, 0, 0, 0, new Specdata3());
}
public Nfs3FileAttributes(NfsFileType nfsType, int nlink, short mode, int uid,
- int gid, long size, long fsid, long fileId, long mtime, long atime) {
+ int gid, long size, long fsid, long fileId, long mtime, long atime, Specdata3 rdev) {
this.type = nfsType.toValue();
this.mode = mode;
- this.nlink = (type == NfsFileType.NFSDIR.toValue()) ? (nlink + 2) : 1;
+ this.nlink = nlink;
this.uid = uid;
this.gid = gid;
this.size = size;
- if(type == NfsFileType.NFSDIR.toValue()) {
- this.size = getDirSize(nlink);
- }
this.used = this.size;
this.rdev = new Specdata3();
this.fsid = fsid;
@@ -103,6 +98,7 @@ public class Nfs3FileAttributes {
this.mtime = new NfsTime(mtime);
this.atime = atime != 0 ? new NfsTime(atime) : this.mtime;
this.ctime = this.mtime;
+ this.rdev = rdev;
}
public Nfs3FileAttributes(Nfs3FileAttributes other) {
@@ -147,10 +143,7 @@ public class Nfs3FileAttributes {
attr.gid = xdr.readInt();
attr.size = xdr.readHyper();
attr.used = xdr.readHyper();
- // Ignore rdev
- xdr.readInt();
- xdr.readInt();
- attr.rdev = new Specdata3();
+ attr.rdev = new Specdata3(xdr.readInt(), xdr.readInt());
attr.fsid = xdr.readHyper();
attr.fileId = xdr.readHyper();
attr.atime = NfsTime.deserialize(xdr);
@@ -228,11 +221,11 @@ public class Nfs3FileAttributes {
return this.gid;
}
- /**
- * HDFS directory size is always zero. Try to return something meaningful
- * here. Assume each child take 32bytes.
- */
- public static long getDirSize(int childNum) {
- return (childNum + 2) * 32;
+ public Specdata3 getRdev() {
+ return rdev;
+ }
+
+ public void setRdev(Specdata3 rdev) {
+ this.rdev = rdev;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc868de/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
index 6c42c84..50e83ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
@@ -67,11 +67,14 @@ public class Nfs3Utils {
*/
NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
-
- return new Nfs3FileAttributes(fileType, fs.getChildrenNum(), fs
- .getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
- iug.getGidAllowingUnknown(fs.getGroup()), fs.getLen(), 0 /* fsid */,
- fs.getFileId(), fs.getModificationTime(), fs.getAccessTime());
+ int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
+ long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
+ .getChildrenNum()) : fs.getLen();
+ return new Nfs3FileAttributes(fileType, nlink,
+ fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
+ iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
+ fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
+ new Nfs3FileAttributes.Specdata3());
}
public static Nfs3FileAttributes getFileAttr(DFSClient client,
@@ -80,6 +83,14 @@ public class Nfs3Utils {
return fs == null ? null : getNfs3FileAttrFromFileStatus(fs, iug);
}
+ /**
+ * HDFS directory size is always zero. Try to return something meaningful
+ * here. Assume each child take 32bytes.
+ */
+ public static long getDirSize(int childNum) {
+ return (childNum + 2) * 32;
+ }
+
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
throws IOException {
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
@@ -87,8 +98,8 @@ public class Nfs3Utils {
return null;
}
- long size = fstat.isDir() ? Nfs3FileAttributes.getDirSize(fstat
- .getChildrenNum()) : fstat.getLen();
+ long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
+ .getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
new NfsTime(fstat.getModificationTime()));
}
[06/25] hadoop git commit: HDFS-7383. Merged to branch-2.6 also.
Posted by vi...@apache.org.
HDFS-7383. Merged to branch-2.6 also.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f62ec317
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f62ec317
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f62ec317
Branch: refs/heads/HDFS-EC
Commit: f62ec31739cc15097107655c6c8265b5d3625817
Parents: 14b87b7
Author: Arun C. Murthy <ac...@apache.org>
Authored: Sun Nov 9 18:22:05 2014 -0800
Committer: Arun C. Murthy <ac...@apache.org>
Committed: Sun Nov 9 18:22:05 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62ec317/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index af18379..6904686 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -407,9 +407,6 @@ Release 2.7.0 - UNRELEASED
HDFS-7366. BlockInfo should take replication as an short in the constructor.
(Li Lu via wheat9)
- HDFS-7383. DataNode.requestShortCircuitFdsForRead may throw
- NullPointerException. (szetszwo via suresh)
-
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -1418,6 +1415,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7382. DataNode in secure mode may throw NullPointerException if client
connects before DataNode registers itself with NameNode. (cnauroth)
+ HDFS-7383. DataNode.requestShortCircuitFdsForRead may throw
+ NullPointerException. (szetszwo via suresh)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
[18/25] hadoop git commit: YARN-2735. diskUtilizationPercentageCutoff
and diskUtilizationSpaceCutoff are initialized twice in DirectoryCollection.
(Zhihai Xu via kasha)
Posted by vi...@apache.org.
YARN-2735. diskUtilizationPercentageCutoff and diskUtilizationSpaceCutoff are initialized twice in DirectoryCollection. (Zhihai Xu via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/061bc293
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/061bc293
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/061bc293
Branch: refs/heads/HDFS-EC
Commit: 061bc293c8dd3e2605cf150568988bde18407af6
Parents: 58e9bf4
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue Nov 11 10:31:39 2014 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Tue Nov 11 10:31:39 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../hadoop/yarn/server/nodemanager/DirectoryCollection.java | 2 --
2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061bc293/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9bb016d..c6a063c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -62,6 +62,9 @@ Release 2.7.0 - UNRELEASED
YARN-2712. TestWorkPreservingRMRestart: Augment FS tests with
queue and headroom checks. (Tsuyoshi Ozawa via kasha)
+ YARN-2735. diskUtilizationPercentageCutoff and diskUtilizationSpaceCutoff
+ are initialized twice in DirectoryCollection. (Zhihai Xu via kasha)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061bc293/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index 279787b..c019aa9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -146,8 +146,6 @@ class DirectoryCollection {
errorDirs = new CopyOnWriteArrayList<String>();
fullDirs = new CopyOnWriteArrayList<String>();
- diskUtilizationPercentageCutoff = utilizationPercentageCutOff;
- diskUtilizationSpaceCutoff = utilizationSpaceCutOff;
diskUtilizationPercentageCutoff =
utilizationPercentageCutOff < 0.0F ? 0.0F
: (utilizationPercentageCutOff > 100.0F ? 100.0F