You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/07/10 17:42:06 UTC
[01/21] hadoop git commit: HDFS-8642. Make TestFileTruncate more
reliable. (Contributed by Rakesh R)
Repository: hadoop
Updated Branches:
refs/heads/HADOOP-12111 5e42d11fb -> 34c014d59
HDFS-8642. Make TestFileTruncate more reliable. (Contributed by Rakesh R)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4119ad31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4119ad31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4119ad31
Branch: refs/heads/HADOOP-12111
Commit: 4119ad3112dcfb7286ca68288489bbcb6235cf53
Parents: 98e5926
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Jul 8 09:31:02 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Jul 8 09:31:02 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hdfs/server/namenode/TestFileTruncate.java | 18 ++++++------------
2 files changed, 9 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4119ad31/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b88b42a..a7c9e7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1017,6 +1017,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8686. WebHdfsFileSystem#getXAttr(Path p, final String name) doesn't
work if namespace is in capitals (kanaka kumar avvaru via vinayakumarb)
+ HDFS-8642. Make TestFileTruncate more reliable. (Rakesh R via
+ Arpit Agarwal)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4119ad31/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index e0f9ad2..a91d6c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -62,9 +62,8 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
-import org.junit.AfterClass;
+import org.junit.After;
import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
public class TestFileTruncate {
@@ -90,8 +89,8 @@ public class TestFileTruncate {
private Path parent;
- @BeforeClass
- public static void startUp() throws IOException {
+ @Before
+ public void setUp() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
@@ -105,20 +104,15 @@ public class TestFileTruncate {
.waitSafeMode(true)
.build();
fs = cluster.getFileSystem();
+ parent = new Path("/test");
}
- @AfterClass
- public static void tearDown() throws IOException {
+ @After
+ public void tearDown() throws IOException {
if(fs != null) fs.close();
if(cluster != null) cluster.shutdown();
}
- @Before
- public void setup() throws IOException {
- parent = new Path("/test");
- fs.delete(parent, true);
- }
-
/**
* Truncate files of different sizes byte by byte.
*/
[21/21] hadoop git commit: Merge branch 'trunk' into HADOOP-12111
Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34c014d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34c014d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34c014d5
Branch: refs/heads/HADOOP-12111
Commit: 34c014d593989f3da5376f762ebbdfd80284d69a
Parents: 5e42d11 0824426
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Jul 10 08:41:27 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Jul 10 08:41:27 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 17 +
.../java/org/apache/hadoop/fs/FileContext.java | 3 +
.../java/org/apache/hadoop/fs/FileSystem.java | 20 +-
.../main/java/org/apache/hadoop/fs/Globber.java | 17 +
.../org/apache/hadoop/util/CpuTimeTracker.java | 115 +++
.../java/org/apache/hadoop/util/SysInfo.java | 123 +++
.../org/apache/hadoop/util/SysInfoLinux.java | 535 ++++++++++++
.../org/apache/hadoop/util/SysInfoWindows.java | 196 +++++
...yptoStreamsWithOpensslAesCtrCryptoCodec.java | 3 +
.../apache/hadoop/util/TestSysInfoLinux.java | 361 ++++++++
.../apache/hadoop/util/TestSysInfoWindows.java | 100 +++
.../dev-support/findbugsExcludeFile.xml | 4 +
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml | 42 +
.../src/main/proto/ClientDatanodeProtocol.proto | 247 ++++++
.../src/main/proto/ClientNamenodeProtocol.proto | 863 +++++++++++++++++++
.../hadoop-hdfs-client/src/main/proto/acl.proto | 108 +++
.../src/main/proto/datatransfer.proto | 304 +++++++
.../src/main/proto/encryption.proto | 67 ++
.../src/main/proto/hdfs.proto | 611 +++++++++++++
.../src/main/proto/inotify.proto | 126 +++
.../src/main/proto/xattr.proto | 75 ++
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 15 +
hadoop-hdfs-project/hadoop-hdfs/pom.xml | 10 +-
.../hadoop-hdfs/src/contrib/bkjournal/pom.xml | 2 +-
.../ClientNamenodeProtocolTranslatorPB.java | 2 +-
.../server/blockmanagement/BlockManager.java | 2 -
.../hdfs/server/namenode/FSEditLogOp.java | 4 +-
.../src/main/proto/ClientDatanodeProtocol.proto | 247 ------
.../src/main/proto/ClientNamenodeProtocol.proto | 863 -------------------
.../hadoop-hdfs/src/main/proto/acl.proto | 113 ---
.../src/main/proto/datatransfer.proto | 304 -------
.../hadoop-hdfs/src/main/proto/editlog.proto | 35 +
.../hadoop-hdfs/src/main/proto/encryption.proto | 67 --
.../hadoop-hdfs/src/main/proto/hdfs.proto | 611 -------------
.../hadoop-hdfs/src/main/proto/inotify.proto | 126 ---
.../hadoop-hdfs/src/main/proto/xattr.proto | 80 --
.../hdfs/server/namenode/TestFileTruncate.java | 21 +-
.../hdfs/tools/TestDelegationTokenFetcher.java | 39 +
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../hadoop/mapred/TestShuffleHandler.java | 6 +-
hadoop-maven-plugins/pom.xml | 8 +
.../hadoop/maven/plugin/protoc/ProtocMojo.java | 188 +++-
.../gridmix/DummyResourceCalculatorPlugin.java | 19 +
.../hadoop/yarn/sls/nodemanager/NodeInfo.java | 8 +-
.../yarn/sls/scheduler/RMNodeWrapper.java | 5 +
hadoop-yarn-project/CHANGES.txt | 15 +
.../yarn/client/TestResourceTrackerOnHA.java | 2 +-
.../hadoop/yarn/event/AsyncDispatcher.java | 24 +-
.../apache/hadoop/yarn/util/CpuTimeTracker.java | 100 ---
.../util/LinuxResourceCalculatorPlugin.java | 392 +--------
.../yarn/util/ProcfsBasedProcessTree.java | 34 +-
.../yarn/util/ResourceCalculatorPlugin.java | 84 +-
.../yarn/util/WindowsBasedProcessTree.java | 2 +-
.../util/WindowsResourceCalculatorPlugin.java | 158 +---
.../hadoop/yarn/event/DrainDispatcher.java | 13 +-
.../hadoop/yarn/event/TestAsyncDispatcher.java | 61 ++
.../util/TestLinuxResourceCalculatorPlugin.java | 324 -------
.../util/TestResourceCalculatorProcessTree.java | 2 +-
.../TestWindowsResourceCalculatorPlugin.java | 86 --
.../yarn/server/api/records/NodeStatus.java | 43 +-
.../server/api/records/ResourceUtilization.java | 133 +++
.../api/records/impl/pb/NodeStatusPBImpl.java | 34 +-
.../impl/pb/ResourceUtilizationPBImpl.java | 104 +++
.../yarn/server/api/records/package-info.java | 19 +
.../main/proto/yarn_server_common_protos.proto | 7 +
.../nodemanager/NodeStatusUpdaterImpl.java | 19 +-
.../monitor/ContainersMonitor.java | 3 +-
.../monitor/ContainersMonitorImpl.java | 28 +
.../TestPrivilegedOperationExecutor.java | 6 +-
.../reservation/GreedyReservationAgent.java | 27 +-
.../reservation/InMemoryPlan.java | 9 +-
.../InMemoryReservationAllocation.java | 24 +-
.../RLESparseResourceAllocation.java | 43 +-
.../reservation/ReservationAllocation.java | 3 +-
.../reservation/ReservationSystemUtil.java | 51 ++
.../server/resourcemanager/rmnode/RMNode.java | 2 +
.../resourcemanager/rmnode/RMNodeImpl.java | 43 +-
.../resourcemanager/webapp/RMAppsBlock.java | 10 +-
.../yarn/server/resourcemanager/MockNodes.java | 5 +
.../resourcemanager/TestRMNodeTransitions.java | 36 +-
.../reservation/ReservationSystemTestUtil.java | 11 +-
.../reservation/TestCapacityOverTimePolicy.java | 16 +-
.../reservation/TestGreedyReservationAgent.java | 2 +-
.../reservation/TestInMemoryPlan.java | 37 +-
.../TestInMemoryReservationAllocation.java | 29 +-
.../TestRLESparseResourceAllocation.java | 33 +-
.../TestSimpleCapacityReplanner.java | 11 +-
87 files changed, 5074 insertions(+), 3726 deletions(-)
----------------------------------------------------------------------
[20/21] hadoop git commit: YARN-3445. Cache runningApps in RMNode for
getting running apps on given NodeId. (Junping Du via mingma)
Posted by aw...@apache.org.
YARN-3445. Cache runningApps in RMNode for getting running apps on given NodeId. (Junping Du via mingma)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08244264
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08244264
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08244264
Branch: refs/heads/HADOOP-12111
Commit: 08244264c0583472b9c4e16591cfde72c6db62a2
Parents: b489080
Author: Ming Ma <mi...@apache.org>
Authored: Fri Jul 10 08:30:10 2015 -0700
Committer: Ming Ma <mi...@apache.org>
Committed: Fri Jul 10 08:30:10 2015 -0700
----------------------------------------------------------------------
.../hadoop/yarn/sls/nodemanager/NodeInfo.java | 8 +++-
.../yarn/sls/scheduler/RMNodeWrapper.java | 5 +++
hadoop-yarn-project/CHANGES.txt | 3 ++
.../server/resourcemanager/rmnode/RMNode.java | 2 +
.../resourcemanager/rmnode/RMNodeImpl.java | 43 ++++++++++++++++----
.../yarn/server/resourcemanager/MockNodes.java | 5 +++
.../resourcemanager/TestRMNodeTransitions.java | 36 ++++++++++++++--
7 files changed, 91 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08244264/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index ee6eb7b..440779c 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -62,7 +62,8 @@ public class NodeInfo {
private NodeState state;
private List<ContainerId> toCleanUpContainers;
private List<ApplicationId> toCleanUpApplications;
-
+ private List<ApplicationId> runningApplications;
+
public FakeRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress,
Resource perNode, String rackName, String healthReport,
int cmdPort, String hostName, NodeState state) {
@@ -77,6 +78,7 @@ public class NodeInfo {
this.state = state;
toCleanUpApplications = new ArrayList<ApplicationId>();
toCleanUpContainers = new ArrayList<ContainerId>();
+ runningApplications = new ArrayList<ApplicationId>();
}
public NodeId getNodeID() {
@@ -135,6 +137,10 @@ public class NodeInfo {
return toCleanUpApplications;
}
+ public List<ApplicationId> getRunningApps() {
+ return runningApplications;
+ }
+
public void updateNodeHeartbeatResponseForCleanup(
NodeHeartbeatResponse response) {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08244264/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index b64be1b..a6633ae 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -119,6 +119,11 @@ public class RMNodeWrapper implements RMNode {
}
@Override
+ public List<ApplicationId> getRunningApps() {
+ return node.getRunningApps();
+ }
+
+ @Override
public void updateNodeHeartbeatResponseForCleanup(
NodeHeartbeatResponse nodeHeartbeatResponse) {
node.updateNodeHeartbeatResponseForCleanup(nodeHeartbeatResponse);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08244264/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2a9ff98..db000d7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1678,6 +1678,9 @@ Release 2.6.0 - 2014-11-18
YARN-2811. In Fair Scheduler, reservation fulfillments shouldn't ignore max
share (Siqi Li via Sandy Ryza)
+ YARN-3445. Cache runningApps in RMNode for getting running apps on given
+ NodeId. (Junping Du via mingma)
+
IMPROVEMENTS
YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08244264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
index 95eeaf6..0386be6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
@@ -119,6 +119,8 @@ public interface RMNode {
public List<ApplicationId> getAppsToCleanup();
+ List<ApplicationId> getRunningApps();
+
/**
* Update a {@link NodeHeartbeatResponse} with the list of containers and
* applications to clean up for this node.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08244264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index d1e6190..9bc91c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -123,11 +123,16 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
new HashSet<ContainerId>();
/* the list of applications that have finished and need to be purged */
- private final List<ApplicationId> finishedApplications = new ArrayList<ApplicationId>();
+ private final List<ApplicationId> finishedApplications =
+ new ArrayList<ApplicationId>();
+
+ /* the list of applications that are running on this node */
+ private final List<ApplicationId> runningApplications =
+ new ArrayList<ApplicationId>();
private NodeHeartbeatResponse latestNodeHeartBeatResponse = recordFactory
.newRecordInstance(NodeHeartbeatResponse.class);
-
+
private static final StateMachineFactory<RMNodeImpl,
NodeState,
RMNodeEventType,
@@ -136,7 +141,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
NodeState,
RMNodeEventType,
RMNodeEvent>(NodeState.NEW)
-
+
//Transitions from NEW state
.addTransition(NodeState.NEW, NodeState.RUNNING,
RMNodeEventType.STARTED, new AddNodeTransition())
@@ -383,6 +388,16 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
}
@Override
+ public List<ApplicationId> getRunningApps() {
+ this.readLock.lock();
+ try {
+ return new ArrayList<ApplicationId>(this.runningApplications);
+ } finally {
+ this.readLock.unlock();
+ }
+ }
+
+ @Override
public List<ContainerId> getContainersToCleanUp() {
this.readLock.lock();
@@ -519,9 +534,12 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
LOG.warn("Cannot get RMApp by appId=" + appId
+ ", just added it to finishedApplications list for cleanup");
rmNode.finishedApplications.add(appId);
+ rmNode.runningApplications.remove(appId);
return;
}
+ // Add running applications back due to Node add or Node reconnection.
+ rmNode.runningApplications.add(appId);
context.getDispatcher().getEventHandler()
.handle(new RMAppRunningOnNodeEvent(appId, nodeId));
}
@@ -707,8 +725,9 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
@Override
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
- rmNode.finishedApplications.add(((
- RMNodeCleanAppEvent) event).getAppId());
+ ApplicationId appId = ((RMNodeCleanAppEvent) event).getAppId();
+ rmNode.finishedApplications.add(appId);
+ rmNode.runningApplications.remove(appId);
}
}
@@ -910,12 +929,22 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
+ "cleanup, no further processing");
continue;
}
- if (finishedApplications.contains(containerId.getApplicationAttemptId()
- .getApplicationId())) {
+
+ ApplicationId containerAppId =
+ containerId.getApplicationAttemptId().getApplicationId();
+
+ if (finishedApplications.contains(containerAppId)) {
LOG.info("Container " + containerId
+ " belongs to an application that is already killed,"
+ " no further processing");
continue;
+ } else if (!runningApplications.contains(containerAppId)) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Container " + containerId
+ + " is the first container get launched for application "
+ + containerAppId);
+ }
+ runningApplications.add(containerAppId);
}
// Process running containers
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08244264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
index 2d863d1..095fe28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
@@ -187,6 +187,11 @@ public class MockNodes {
}
@Override
+ public List<ApplicationId> getRunningApps() {
+ return null;
+ }
+
+ @Override
public void updateNodeHeartbeatResponseForCleanup(NodeHeartbeatResponse response) {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08244264/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index 01f4357..ece896b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -33,6 +33,7 @@ import java.util.List;
import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
@@ -485,9 +486,9 @@ public class TestRMNodeTransitions {
NodeId nodeId = node.getNodeID();
// Expire a container
- ContainerId completedContainerId = BuilderUtils.newContainerId(
- BuilderUtils.newApplicationAttemptId(
- BuilderUtils.newApplicationId(0, 0), 0), 0);
+ ContainerId completedContainerId = BuilderUtils.newContainerId(
+ BuilderUtils.newApplicationAttemptId(
+ BuilderUtils.newApplicationId(0, 0), 0), 0);
node.handle(new RMNodeCleanContainerEvent(nodeId, completedContainerId));
Assert.assertEquals(1, node.getContainersToCleanUp().size());
@@ -512,6 +513,35 @@ public class TestRMNodeTransitions {
Assert.assertEquals(finishedAppId, hbrsp.getApplicationsToCleanup().get(0));
}
+ @Test(timeout=20000)
+ public void testUpdateHeartbeatResponseForAppLifeCycle() {
+ RMNodeImpl node = getRunningNode();
+ NodeId nodeId = node.getNodeID();
+
+ ApplicationId runningAppId = BuilderUtils.newApplicationId(0, 1);
+ // Create a running container
+ ContainerId runningContainerId = BuilderUtils.newContainerId(
+ BuilderUtils.newApplicationAttemptId(
+ runningAppId, 0), 0);
+
+ ContainerStatus status = ContainerStatus.newInstance(runningContainerId,
+ ContainerState.RUNNING, "", 0);
+ List<ContainerStatus> statusList = new ArrayList<ContainerStatus>();
+ statusList.add(status);
+ NodeHealthStatus nodeHealth = NodeHealthStatus.newInstance(true,
+ "", System.currentTimeMillis());
+ node.handle(new RMNodeStatusEvent(nodeId, nodeHealth,
+ statusList, null, null));
+
+ Assert.assertEquals(1, node.getRunningApps().size());
+
+ // Finish an application
+ ApplicationId finishedAppId = runningAppId;
+ node.handle(new RMNodeCleanAppEvent(nodeId, finishedAppId));
+ Assert.assertEquals(1, node.getAppsToCleanup().size());
+ Assert.assertEquals(0, node.getRunningApps().size());
+ }
+
private RMNodeImpl getRunningNode() {
return getRunningNode(null, 0);
}
[11/21] hadoop git commit: YARN-3878. AsyncDispatcher can hang while
stopping if it is configured for draining events on stop. (Varun Saxena via
kasha)
Posted by aw...@apache.org.
YARN-3878. AsyncDispatcher can hang while stopping if it is configured for draining events on stop. (Varun Saxena via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa067c6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa067c6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa067c6a
Branch: refs/heads/HADOOP-12111
Commit: aa067c6aa47b4c79577096817acc00ad6421180c
Parents: 527c40e
Author: Karthik Kambatla <ka...@apache.org>
Authored: Thu Jul 9 09:48:29 2015 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Thu Jul 9 09:48:29 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../hadoop/yarn/event/AsyncDispatcher.java | 24 ++++----
.../hadoop/yarn/event/DrainDispatcher.java | 13 ++++-
.../hadoop/yarn/event/TestAsyncDispatcher.java | 61 ++++++++++++++++++++
4 files changed, 87 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa067c6a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 19f0854..3c232eb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -624,6 +624,9 @@ Release 2.7.2 - UNRELEASED
YARN-3690. [JDK8] 'mvn site' fails. (Brahma Reddy Battula via aajisaka)
+ YARN-3878. AsyncDispatcher can hang while stopping if it is configured for
+ draining events on stop. (Varun Saxena via kasha)
+
Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa067c6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index c54b9c7..646611f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -55,9 +55,6 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
// stop functionality.
private volatile boolean drainEventsOnStop = false;
- // Indicates all the remaining dispatcher's events on stop have been drained
- // and processed.
- private volatile boolean drained = true;
private Object waitForDrained = new Object();
// For drainEventsOnStop enabled only, block newly coming events into the
@@ -84,13 +81,12 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
@Override
public void run() {
while (!stopped && !Thread.currentThread().isInterrupted()) {
- drained = eventQueue.isEmpty();
// blockNewEvents is only set when dispatcher is draining to stop,
// adding this check is to avoid the overhead of acquiring the lock
// and calling notify every time in the normal run of the loop.
if (blockNewEvents) {
synchronized (waitForDrained) {
- if (drained) {
+ if (eventQueue.isEmpty()) {
waitForDrained.notify();
}
}
@@ -139,7 +135,7 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
blockNewEvents = true;
LOG.info("AsyncDispatcher is draining to stop, igonring any new events.");
synchronized (waitForDrained) {
- while (!drained && eventHandlingThread.isAlive()) {
+ while (!eventQueue.isEmpty() && eventHandlingThread.isAlive()) {
waitForDrained.wait(1000);
LOG.info("Waiting for AsyncDispatcher to drain. Thread state is :" +
eventHandlingThread.getState());
@@ -223,12 +219,21 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
return handlerInstance;
}
+ @VisibleForTesting
+ protected boolean hasPendingEvents() {
+ return !eventQueue.isEmpty();
+ }
+
+ @VisibleForTesting
+ protected boolean isEventThreadWaiting() {
+ return eventHandlingThread.getState() == Thread.State.WAITING;
+ }
+
class GenericEventHandler implements EventHandler<Event> {
public void handle(Event event) {
if (blockNewEvents) {
return;
}
- drained = false;
/* all this method does is enqueue all the events onto the queue */
int qSize = eventQueue.size();
@@ -285,9 +290,4 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
}
};
}
-
- @VisibleForTesting
- protected boolean isDrained() {
- return this.drained;
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa067c6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
index da5ae44..d1f4fe9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -27,15 +27,24 @@ public class DrainDispatcher extends AsyncDispatcher {
this(new LinkedBlockingQueue<Event>());
}
- private DrainDispatcher(BlockingQueue<Event> eventQueue) {
+ public DrainDispatcher(BlockingQueue<Event> eventQueue) {
super(eventQueue);
}
/**
+ * Wait till event thread enters WAITING state (i.e. waiting for new events).
+ */
+ public void waitForEventThreadToWait() {
+ while (!isEventThreadWaiting()) {
+ Thread.yield();
+ }
+ }
+
+ /**
* Busy loop waiting for all queued events to drain.
*/
public void await() {
- while (!isDrained()) {
+ while (hasPendingEvents()) {
Thread.yield();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa067c6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
new file mode 100644
index 0000000..ee17ddd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.event;
+
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestAsyncDispatcher {
+
+ /* This test checks whether dispatcher hangs on close if following two things
+ * happen :
+ * 1. A thread which was putting event to event queue is interrupted.
+ * 2. Event queue is empty on close.
+ */
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ @Test(timeout=10000)
+ public void testDispatcherOnCloseIfQueueEmpty() throws Exception {
+ BlockingQueue<Event> eventQueue = spy(new LinkedBlockingQueue<Event>());
+ Event event = mock(Event.class);
+ doThrow(new InterruptedException()).when(eventQueue).put(event);
+ DrainDispatcher disp = new DrainDispatcher(eventQueue);
+ disp.init(new Configuration());
+ disp.setDrainEventsOnStop();
+ disp.start();
+ // Wait for event handler thread to start and begin waiting for events.
+ disp.waitForEventThreadToWait();
+ try {
+ disp.getEventHandler().handle(event);
+ } catch (YarnRuntimeException e) {
+ }
+ // Queue should be empty and dispatcher should not hang on close
+ Assert.assertTrue("Event Queue should have been empty",
+ eventQueue.isEmpty());
+ disp.close();
+ }
+}
[04/21] hadoop git commit: HADOOP-12194. Support for incremental
generation in the protoc plugin.
Posted by aw...@apache.org.
HADOOP-12194. Support for incremental generation in the protoc plugin.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/625d7ed9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/625d7ed9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/625d7ed9
Branch: refs/heads/HADOOP-12111
Commit: 625d7ed9eb65f0df204b506ce92c11803fbce273
Parents: fc6182d
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Jul 8 11:09:43 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Wed Jul 8 11:09:43 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
hadoop-maven-plugins/pom.xml | 8 +
.../hadoop/maven/plugin/protoc/ProtocMojo.java | 188 +++++++++++++++++--
3 files changed, 185 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/625d7ed9/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8ab109d..6cc6b71 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -719,6 +719,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12172. FsShell mkdir -p makes an unnecessary check for the existence
of the parent. (cnauroth)
+ HADOOP-12194. Support for incremental generation in the protoc plugin.
+ (wang)
+
BUG FIXES
HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
http://git-wip-us.apache.org/repos/asf/hadoop/blob/625d7ed9/hadoop-maven-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index b48b9ac..b39c22b 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -47,6 +47,14 @@
<version>${maven.plugin-tools.version}</version>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>org.codehaus.jackson</groupId>
+ <artifactId>jackson-core-asl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.codehaus.jackson</groupId>
+ <artifactId>jackson-mapper-asl</artifactId>
+ </dependency>
</dependencies>
<build>
<plugins>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/625d7ed9/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
index 465b713..b9be33e 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
@@ -22,11 +22,21 @@ import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.project.MavenProject;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
-
+import java.util.Map;
+import java.util.zip.CRC32;
@Mojo(name="protoc", defaultPhase = LifecyclePhase.GENERATE_SOURCES)
public class ProtocMojo extends AbstractMojo {
@@ -49,6 +59,118 @@ public class ProtocMojo extends AbstractMojo {
@Parameter(required=true)
private String protocVersion;
+ @Parameter(defaultValue =
+ "${project.build.directory}/hadoop-maven-plugins-protoc-checksums.json")
+ private String checksumPath;
+
+ /**
+ * Compares include and source file checksums against previously computed
+ * checksums stored in a json file in the build directory.
+ */
+ public class ChecksumComparator {
+
+ private final Map<String, Long> storedChecksums;
+ private final Map<String, Long> computedChecksums;
+
+ private final File checksumFile;
+
+ ChecksumComparator(String checksumPath) throws IOException {
+ checksumFile = new File(checksumPath);
+ // Read in the checksums
+ if (checksumFile.exists()) {
+ ObjectMapper mapper = new ObjectMapper();
+ storedChecksums = mapper
+ .readValue(checksumFile, new TypeReference<Map<String, Long>>() {
+ });
+ } else {
+ storedChecksums = new HashMap<>(0);
+ }
+ computedChecksums = new HashMap<>();
+ }
+
+ public boolean hasChanged(File file) throws IOException {
+ if (!file.exists()) {
+ throw new FileNotFoundException(
+ "Specified protoc include or source does not exist: " + file);
+ }
+ if (file.isDirectory()) {
+ return hasDirectoryChanged(file);
+ } else if (file.isFile()) {
+ return hasFileChanged(file);
+ } else {
+ throw new IOException("Not a file or directory: " + file);
+ }
+ }
+
+ private boolean hasDirectoryChanged(File directory) throws IOException {
+ File[] listing = directory.listFiles();
+ boolean changed = false;
+ // Do not exit early, since we need to compute and save checksums
+ // for each file within the directory.
+ for (File f : listing) {
+ if (f.isDirectory()) {
+ if (hasDirectoryChanged(f)) {
+ changed = true;
+ }
+ } else if (f.isFile()) {
+ if (hasFileChanged(f)) {
+ changed = true;
+ }
+ } else {
+ getLog().debug("Skipping entry that is not a file or directory: "
+ + f);
+ }
+ }
+ return changed;
+ }
+
+ private boolean hasFileChanged(File file) throws IOException {
+ long computedCsum = computeChecksum(file);
+
+ // Return if the generated csum matches the stored csum
+ Long storedCsum = storedChecksums.get(file.getCanonicalPath());
+ if (storedCsum == null || storedCsum.longValue() != computedCsum) {
+ // It has changed.
+ return true;
+ }
+ return false;
+ }
+
+ private long computeChecksum(File file) throws IOException {
+ // If we've already computed the csum, reuse the computed value
+ final String canonicalPath = file.getCanonicalPath();
+ if (computedChecksums.containsKey(canonicalPath)) {
+ return computedChecksums.get(canonicalPath);
+ }
+ // Compute the csum for the file
+ CRC32 crc = new CRC32();
+ byte[] buffer = new byte[1024*64];
+ try (BufferedInputStream in =
+ new BufferedInputStream(new FileInputStream(file))) {
+ while (true) {
+ int read = in.read(buffer);
+ if (read <= 0) {
+ break;
+ }
+ crc.update(buffer, 0, read);
+ }
+ }
+ // Save it in the generated map and return
+ final long computedCsum = crc.getValue();
+ computedChecksums.put(canonicalPath, computedCsum);
+ return crc.getValue();
+ }
+
+ public void writeChecksums() throws IOException {
+ ObjectMapper mapper = new ObjectMapper();
+ try (BufferedOutputStream out = new BufferedOutputStream(
+ new FileOutputStream(checksumFile))) {
+ mapper.writeValue(out, computedChecksums);
+ getLog().info("Wrote protoc checksums to file " + checksumFile);
+ }
+ }
+ }
+
public void execute() throws MojoExecutionException {
try {
List<String> command = new ArrayList<String>();
@@ -58,7 +180,7 @@ public class ProtocMojo extends AbstractMojo {
List<String> out = new ArrayList<String>();
if (exec.run(command, out) == 127) {
getLog().error("protoc, not found at: " + protocCommand);
- throw new MojoExecutionException("protoc failure");
+ throw new MojoExecutionException("protoc failure");
} else {
if (out.isEmpty()) {
getLog().error("stdout: " + out);
@@ -67,36 +189,74 @@ public class ProtocMojo extends AbstractMojo {
} else {
if (!out.get(0).endsWith(protocVersion)) {
throw new MojoExecutionException(
- "protoc version is '" + out.get(0) + "', expected version is '"
- + protocVersion + "'");
+ "protoc version is '" + out.get(0) + "', expected version is '"
+ + protocVersion + "'");
}
}
}
if (!output.mkdirs()) {
if (!output.exists()) {
- throw new MojoExecutionException("Could not create directory: " +
- output);
+ throw new MojoExecutionException(
+ "Could not create directory: " + output);
}
}
+
+ // Whether the import or source protoc files have changed.
+ ChecksumComparator comparator = new ChecksumComparator(checksumPath);
+ boolean importsChanged = false;
+
command = new ArrayList<String>();
command.add(protocCommand);
command.add("--java_out=" + output.getCanonicalPath());
if (imports != null) {
for (File i : imports) {
+ if (comparator.hasChanged(i)) {
+ importsChanged = true;
+ }
command.add("-I" + i.getCanonicalPath());
}
}
+ // Filter to generate classes for just the changed source files.
+ List<File> changedSources = new ArrayList<>();
+ boolean sourcesChanged = false;
for (File f : FileSetUtils.convertFileSetToFiles(source)) {
- command.add(f.getCanonicalPath());
+ // Need to recompile if the source has changed, or if any import has
+ // changed.
+ if (comparator.hasChanged(f) || importsChanged) {
+ sourcesChanged = true;
+ changedSources.add(f);
+ command.add(f.getCanonicalPath());
+ }
}
- exec = new Exec(this);
- out = new ArrayList<String>();
- if (exec.run(command, out) != 0) {
- getLog().error("protoc compiler error");
- for (String s : out) {
- getLog().error(s);
+
+ if (!sourcesChanged && !importsChanged) {
+ getLog().info("No changes detected in protoc files, skipping "
+ + "generation.");
+ } else {
+ if (getLog().isDebugEnabled()) {
+ StringBuilder b = new StringBuilder();
+ b.append("Generating classes for the following protoc files: [");
+ String prefix = "";
+ for (File f : changedSources) {
+ b.append(prefix);
+ b.append(f.toString());
+ prefix = ", ";
+ }
+ b.append("]");
+ getLog().debug(b.toString());
}
- throw new MojoExecutionException("protoc failure");
+
+ exec = new Exec(this);
+ out = new ArrayList<String>();
+ if (exec.run(command, out) != 0) {
+ getLog().error("protoc compiler error");
+ for (String s : out) {
+ getLog().error(s);
+ }
+ throw new MojoExecutionException("protoc failure");
+ }
+ // Write the new checksum file on success.
+ comparator.writeChecksums();
}
} catch (Throwable ex) {
throw new MojoExecutionException(ex.toString(), ex);
[17/21] hadoop git commit: YARN-3888. ApplicationMaster link is
broken in RM WebUI when appstate is NEW. Contributed by Bibin A Chundatt
Posted by aw...@apache.org.
YARN-3888. ApplicationMaster link is broken in RM WebUI when appstate is
NEW. Contributed by Bibin A Chundatt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52148767
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52148767
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52148767
Branch: refs/heads/HADOOP-12111
Commit: 52148767924baf423172d26f2c6d8a4cfc6e143f
Parents: 1a0752d
Author: Xuan <xg...@apache.org>
Authored: Thu Jul 9 21:37:33 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Thu Jul 9 21:37:33 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../yarn/server/resourcemanager/webapp/RMAppsBlock.java | 10 ++++++----
2 files changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/52148767/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 89b5e9f..2a9ff98 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -607,6 +607,9 @@ Release 2.8.0 - UNRELEASED
YARN-3892. Fixed NPE on RMStateStore#serviceStop when
CapacityScheduler#serviceInit fails. (Bibin A Chundatt via jianhe)
+ YARN-3888. ApplicationMaster link is broken in RM WebUI when appstate is NEW.
+ (Bibin A Chundatt via xgong)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/52148767/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
index d252c30..5e80d23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
@@ -131,13 +131,15 @@ public class RMAppsBlock extends AppsBlock {
String trackingURL =
app.getTrackingUrl() == null
- || app.getTrackingUrl().equals(UNAVAILABLE) ? null : app
- .getTrackingUrl();
+ || app.getTrackingUrl().equals(UNAVAILABLE)
+ || app.getAppState() == YarnApplicationState.NEW ? null : app
+ .getTrackingUrl();
String trackingUI =
app.getTrackingUrl() == null
- || app.getTrackingUrl().equals(UNAVAILABLE) ? "Unassigned" : app
- .getAppState() == YarnApplicationState.FINISHED
+ || app.getTrackingUrl().equals(UNAVAILABLE)
+ || app.getAppState() == YarnApplicationState.NEW ? "Unassigned"
+ : app.getAppState() == YarnApplicationState.FINISHED
|| app.getAppState() == YarnApplicationState.FAILED
|| app.getAppState() == YarnApplicationState.KILLED ? "History"
: "ApplicationMaster";
[12/21] hadoop git commit: HADOOP-12180. Move
ResourceCalculatorPlugin from YARN to Common. (Chris Douglas via kasha)
Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
deleted file mode 100644
index a59d503..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.util;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Random;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.fs.Path;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * A JUnit test to test {@link LinuxResourceCalculatorPlugin}
- * Create the fake /proc/ information and verify the parsing and calculation
- */
-public class TestLinuxResourceCalculatorPlugin {
- /**
- * LinuxResourceCalculatorPlugin with a fake timer
- */
- static class FakeLinuxResourceCalculatorPlugin extends
- LinuxResourceCalculatorPlugin {
-
- long currentTime = 0;
- public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
- String procfsCpuFile,
- String procfsStatFile,
- long jiffyLengthInMillis) {
- super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
- }
- @Override
- long getCurrentTime() {
- return currentTime;
- }
- public void advanceTime(long adv) {
- currentTime += adv * this.getJiffyLengthInMillis();
- }
- }
- private static final FakeLinuxResourceCalculatorPlugin plugin;
- private static String TEST_ROOT_DIR = new Path(System.getProperty(
- "test.build.data", "/tmp")).toString().replace(' ', '+');
- private static final String FAKE_MEMFILE;
- private static final String FAKE_CPUFILE;
- private static final String FAKE_STATFILE;
- private static final long FAKE_JIFFY_LENGTH = 10L;
- static {
- int randomNum = (new Random()).nextInt(1000000000);
- FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
- FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
- FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
- plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
- FAKE_STATFILE,
- FAKE_JIFFY_LENGTH);
- }
- static final String MEMINFO_FORMAT =
- "MemTotal: %d kB\n" +
- "MemFree: %d kB\n" +
- "Buffers: 138244 kB\n" +
- "Cached: 947780 kB\n" +
- "SwapCached: 142880 kB\n" +
- "Active: 3229888 kB\n" +
- "Inactive: %d kB\n" +
- "SwapTotal: %d kB\n" +
- "SwapFree: %d kB\n" +
- "Dirty: 122012 kB\n" +
- "Writeback: 0 kB\n" +
- "AnonPages: 2710792 kB\n" +
- "Mapped: 24740 kB\n" +
- "Slab: 132528 kB\n" +
- "SReclaimable: 105096 kB\n" +
- "SUnreclaim: 27432 kB\n" +
- "PageTables: 11448 kB\n" +
- "NFS_Unstable: 0 kB\n" +
- "Bounce: 0 kB\n" +
- "CommitLimit: 4125904 kB\n" +
- "Committed_AS: 4143556 kB\n" +
- "VmallocTotal: 34359738367 kB\n" +
- "VmallocUsed: 1632 kB\n" +
- "VmallocChunk: 34359736375 kB\n" +
- "HugePages_Total: 0\n" +
- "HugePages_Free: 0\n" +
- "HugePages_Rsvd: 0\n" +
- "Hugepagesize: 2048 kB";
-
- static final String CPUINFO_FORMAT =
- "processor : %s\n" +
- "vendor_id : AuthenticAMD\n" +
- "cpu family : 15\n" +
- "model : 33\n" +
- "model name : Dual Core AMD Opteron(tm) Processor 280\n" +
- "stepping : 2\n" +
- "cpu MHz : %f\n" +
- "cache size : 1024 KB\n" +
- "physical id : %s\n" +
- "siblings : 2\n" +
- "core id : %s\n" +
- "cpu cores : 2\n" +
- "fpu : yes\n" +
- "fpu_exception : yes\n" +
- "cpuid level : 1\n" +
- "wp : yes\n" +
- "flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
- "pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
- "3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
- "bogomips : 4792.41\n" +
- "TLB size : 1024 4K pages\n" +
- "clflush size : 64\n" +
- "cache_alignment : 64\n" +
- "address sizes : 40 bits physical, 48 bits virtual\n" +
- "power management: ts fid vid ttp";
-
- static final String STAT_FILE_FORMAT =
- "cpu %d %d %d 1646495089 831319 48713 164346 0\n" +
- "cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
- "cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
- "cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
- "cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
- "intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
- "ctxt 242017731764\n" +
- "btime 1257808753\n" +
- "processes 26414943\n" +
- "procs_running 1\n" +
- "procs_blocked 0\n";
-
- /**
- * Test parsing /proc/stat and /proc/cpuinfo
- * @throws IOException
- */
- @Test
- public void parsingProcStatAndCpuFile() throws IOException {
- // Write fake /proc/cpuinfo file.
- long numProcessors = 8;
- long cpuFrequencyKHz = 2392781;
- String fileContent = "";
- for (int i = 0; i < numProcessors; i++) {
- fileContent +=
- String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D, 0, 0)
- + "\n";
- }
- File tempFile = new File(FAKE_CPUFILE);
- tempFile.deleteOnExit();
- FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
- fWriter.write(fileContent);
- fWriter.close();
- assertEquals(plugin.getNumProcessors(), numProcessors);
- assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
-
- // Write fake /proc/stat file.
- long uTime = 54972994;
- long nTime = 188860;
- long sTime = 19803373;
- tempFile = new File(FAKE_STATFILE);
- tempFile.deleteOnExit();
- updateStatFile(uTime, nTime, sTime);
- assertEquals(plugin.getCumulativeCpuTime(),
- FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
- assertEquals(plugin.getCpuUsage(), (float)(CpuTimeTracker.UNAVAILABLE),0.0);
-
- // Advance the time and sample again to test the CPU usage calculation
- uTime += 100L;
- plugin.advanceTime(200L);
- updateStatFile(uTime, nTime, sTime);
- assertEquals(plugin.getCumulativeCpuTime(),
- FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
- assertEquals(plugin.getCpuUsage(), 6.25F, 0.0);
-
- // Advance the time and sample again. This time, we call getCpuUsage() only.
- uTime += 600L;
- plugin.advanceTime(300L);
- updateStatFile(uTime, nTime, sTime);
- assertEquals(plugin.getCpuUsage(), 25F, 0.0);
-
- // Advance very short period of time (one jiffy length).
- // In this case, CPU usage should not be updated.
- uTime += 1L;
- plugin.advanceTime(1L);
- updateStatFile(uTime, nTime, sTime);
- assertEquals(plugin.getCumulativeCpuTime(),
- FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
- assertEquals(plugin.getCpuUsage(), 25F, 0.0); // CPU usage is not updated.
- }
-
- /**
- * Write information to fake /proc/stat file
- */
- private void updateStatFile(long uTime, long nTime, long sTime)
- throws IOException {
- FileWriter fWriter = new FileWriter(FAKE_STATFILE);
- fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
- fWriter.close();
- }
-
- /**
- * Test parsing /proc/meminfo
- * @throws IOException
- */
- @Test
- public void parsingProcMemFile() throws IOException {
- long memTotal = 4058864L;
- long memFree = 99632L;
- long inactive = 567732L;
- long swapTotal = 2096472L;
- long swapFree = 1818480L;
- File tempFile = new File(FAKE_MEMFILE);
- tempFile.deleteOnExit();
- FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
- fWriter.write(String.format(MEMINFO_FORMAT,
- memTotal, memFree, inactive, swapTotal, swapFree));
-
- fWriter.close();
- assertEquals(plugin.getAvailablePhysicalMemorySize(),
- 1024L * (memFree + inactive));
- assertEquals(plugin.getAvailableVirtualMemorySize(),
- 1024L * (memFree + inactive + swapFree));
- assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
- assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
- }
-
- @Test
- public void testCoreCounts() throws IOException {
-
- String fileContent = "";
- // single core, hyper threading
- long numProcessors = 2;
- long cpuFrequencyKHz = 2392781;
- for (int i = 0; i < numProcessors; i++) {
- fileContent =
- fileContent.concat(String.format(CPUINFO_FORMAT, i,
- cpuFrequencyKHz / 1000D, 0, 0));
- fileContent = fileContent.concat("\n");
- }
- writeFakeCPUInfoFile(fileContent);
- plugin.setReadCpuInfoFile(false);
- assertEquals(numProcessors, plugin.getNumProcessors());
- assertEquals(1, plugin.getNumCores());
-
- // single socket quad core, no hyper threading
- fileContent = "";
- numProcessors = 4;
- for (int i = 0; i < numProcessors; i++) {
- fileContent =
- fileContent.concat(String.format(CPUINFO_FORMAT, i,
- cpuFrequencyKHz / 1000D, 0, i));
- fileContent = fileContent.concat("\n");
- }
- writeFakeCPUInfoFile(fileContent);
- plugin.setReadCpuInfoFile(false);
- assertEquals(numProcessors, plugin.getNumProcessors());
- assertEquals(4, plugin.getNumCores());
-
- // dual socket single core, hyper threading
- fileContent = "";
- numProcessors = 4;
- for (int i = 0; i < numProcessors; i++) {
- fileContent =
- fileContent.concat(String.format(CPUINFO_FORMAT, i,
- cpuFrequencyKHz / 1000D, i / 2, 0));
- fileContent = fileContent.concat("\n");
- }
- writeFakeCPUInfoFile(fileContent);
- plugin.setReadCpuInfoFile(false);
- assertEquals(numProcessors, plugin.getNumProcessors());
- assertEquals(2, plugin.getNumCores());
-
- // dual socket, dual core, no hyper threading
- fileContent = "";
- numProcessors = 4;
- for (int i = 0; i < numProcessors; i++) {
- fileContent =
- fileContent.concat(String.format(CPUINFO_FORMAT, i,
- cpuFrequencyKHz / 1000D, i / 2, i % 2));
- fileContent = fileContent.concat("\n");
- }
- writeFakeCPUInfoFile(fileContent);
- plugin.setReadCpuInfoFile(false);
- assertEquals(numProcessors, plugin.getNumProcessors());
- assertEquals(4, plugin.getNumCores());
-
- // dual socket, dual core, hyper threading
- fileContent = "";
- numProcessors = 8;
- for (int i = 0; i < numProcessors; i++) {
- fileContent =
- fileContent.concat(String.format(CPUINFO_FORMAT, i,
- cpuFrequencyKHz / 1000D, i / 4, (i % 4) / 2));
- fileContent = fileContent.concat("\n");
- }
- writeFakeCPUInfoFile(fileContent);
- plugin.setReadCpuInfoFile(false);
- assertEquals(numProcessors, plugin.getNumProcessors());
- assertEquals(4, plugin.getNumCores());
- }
-
- private void writeFakeCPUInfoFile(String content) throws IOException {
- File tempFile = new File(FAKE_CPUFILE);
- FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
- tempFile.deleteOnExit();
- try {
- fWriter.write(content);
- } finally {
- IOUtils.closeQuietly(fWriter);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
index 777ea9f..7a3e0e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
@@ -65,7 +65,7 @@ public class TestResourceCalculatorProcessTree {
@Override
public float getCpuUsagePercent() {
- return CpuTimeTracker.UNAVAILABLE;
+ return UNAVAILABLE;
}
public boolean checkPidPgrpidForMatch() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsResourceCalculatorPlugin.java
deleted file mode 100644
index a9e20bc..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsResourceCalculatorPlugin.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.util;
-
-import org.junit.Test;
-import static org.junit.Assert.assertTrue;
-
-public class TestWindowsResourceCalculatorPlugin {
-
-
- class WindowsResourceCalculatorPluginTester extends WindowsResourceCalculatorPlugin {
- private String infoStr = null;
- @Override
- String getSystemInfoInfoFromShell() {
- return infoStr;
- }
- }
-
- @Test (timeout = 30000)
- public void parseSystemInfoString() {
- WindowsResourceCalculatorPluginTester tester = new WindowsResourceCalculatorPluginTester();
- // info str derived from windows shell command has \r\n termination
- tester.infoStr = "17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n";
- // call a method to refresh values
- tester.getAvailablePhysicalMemorySize();
- // verify information has been refreshed
- assertTrue(tester.vmemSize == 17177038848L);
- assertTrue(tester.memSize == 8589467648L);
- assertTrue(tester.vmemAvailable == 15232745472L);
- assertTrue(tester.memAvailable == 6400417792L);
- assertTrue(tester.numProcessors == 1);
- assertTrue(tester.cpuFrequencyKhz == 2805000L);
- assertTrue(tester.cumulativeCpuTimeMs == 6261812L);
- assertTrue(tester.cpuUsage == -1);
- }
-
- @Test (timeout = 20000)
- public void refreshAndCpuUsage() throws InterruptedException {
- WindowsResourceCalculatorPluginTester tester = new WindowsResourceCalculatorPluginTester();
- // info str derived from windows shell command has \r\n termination
- tester.infoStr = "17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n";
- tester.getAvailablePhysicalMemorySize();
- // verify information has been refreshed
- assertTrue(tester.memAvailable == 6400417792L);
- assertTrue(tester.cpuUsage == -1);
-
- tester.infoStr = "17177038848,8589467648,15232745472,5400417792,1,2805000,6261812\r\n";
- tester.getAvailablePhysicalMemorySize();
- // verify information has not been refreshed
- assertTrue(tester.memAvailable == 6400417792L);
- assertTrue(tester.cpuUsage == -1);
-
- Thread.sleep(1500);
- tester.infoStr = "17177038848,8589467648,15232745472,5400417792,1,2805000,6286812\r\n";
- tester.getAvailablePhysicalMemorySize();
- // verify information has been refreshed
- assertTrue(tester.memAvailable == 5400417792L);
- assertTrue(tester.cpuUsage >= 0.1);
- }
-
- @Test (timeout = 20000)
- public void errorInGetSystemInfo() {
- WindowsResourceCalculatorPluginTester tester = new WindowsResourceCalculatorPluginTester();
- // info str derived from windows shell command has \r\n termination
- tester.infoStr = null;
- // call a method to refresh values
- tester.getAvailablePhysicalMemorySize();
- }
-
-}
[15/21] hadoop git commit: YARN-3800. Reduce storage footprint for
ReservationAllocation. Contributed by Anubhav Dhoot.
Posted by aw...@apache.org.
YARN-3800. Reduce storage footprint for ReservationAllocation. Contributed by Anubhav Dhoot.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e602fa3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e602fa3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e602fa3
Branch: refs/heads/HADOOP-12111
Commit: 0e602fa3a1529134214452fba10a90307d9c2072
Parents: f4ca530
Author: carlo curino <Carlo Curino>
Authored: Thu Jul 9 16:47:35 2015 -0700
Committer: carlo curino <Carlo Curino>
Committed: Thu Jul 9 16:51:59 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 ++
.../reservation/GreedyReservationAgent.java | 27 ++++++-----
.../reservation/InMemoryPlan.java | 9 ++--
.../InMemoryReservationAllocation.java | 24 +++++----
.../RLESparseResourceAllocation.java | 43 ++---------------
.../reservation/ReservationAllocation.java | 3 +-
.../reservation/ReservationSystemUtil.java | 51 ++++++++++++++++++++
.../reservation/ReservationSystemTestUtil.java | 11 +++--
.../reservation/TestCapacityOverTimePolicy.java | 16 +++---
.../reservation/TestGreedyReservationAgent.java | 2 +-
.../reservation/TestInMemoryPlan.java | 37 ++++++++++----
.../TestInMemoryReservationAllocation.java | 29 ++++++-----
.../TestRLESparseResourceAllocation.java | 33 ++++++-------
.../TestSimpleCapacityReplanner.java | 11 +++--
14 files changed, 176 insertions(+), 123 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3c232eb..89b5e9f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -323,6 +323,9 @@ Release 2.8.0 - UNRELEASED
YARN-3827. Migrate YARN native build to new CMake framework (Alan Burlison
via Colin P. McCabe)
+ YARN-3800. Reduce storage footprint for ReservationAllocation. (Anubhav Dhoot
+ via curino)
+
OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/GreedyReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/GreedyReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/GreedyReservationAgent.java
index 5a61b94..214df1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/GreedyReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/GreedyReservationAgent.java
@@ -97,8 +97,8 @@ public class GreedyReservationAgent implements ReservationAgent {
long curDeadline = deadline;
long oldDeadline = -1;
- Map<ReservationInterval, ReservationRequest> allocations =
- new HashMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> allocations =
+ new HashMap<ReservationInterval, Resource>();
RLESparseResourceAllocation tempAssigned =
new RLESparseResourceAllocation(plan.getResourceCalculator(),
plan.getMinimumAllocation());
@@ -108,6 +108,8 @@ public class GreedyReservationAgent implements ReservationAgent {
ReservationRequestInterpreter type = contract.getReservationRequests()
.getInterpreter();
+ boolean hasGang = false;
+
// Iterate the stages in backward from deadline
for (ListIterator<ReservationRequest> li =
stages.listIterator(stages.size()); li.hasPrevious();) {
@@ -117,8 +119,10 @@ public class GreedyReservationAgent implements ReservationAgent {
// validate the RR respect basic constraints
validateInput(plan, currentReservationStage, totalCapacity);
+ hasGang |= currentReservationStage.getConcurrency() > 1;
+
// run allocation for a single stage
- Map<ReservationInterval, ReservationRequest> curAlloc =
+ Map<ReservationInterval, Resource> curAlloc =
placeSingleStage(plan, tempAssigned, currentReservationStage,
earliestStart, curDeadline, oldReservation, totalCapacity);
@@ -178,8 +182,7 @@ public class GreedyReservationAgent implements ReservationAgent {
// create reservation with above allocations if not null/empty
- ReservationRequest ZERO_RES =
- ReservationRequest.newInstance(Resource.newInstance(0, 0), 0);
+ Resource ZERO_RES = Resource.newInstance(0, 0);
long firstStartTime = findEarliestTime(allocations.keySet());
@@ -200,7 +203,7 @@ public class GreedyReservationAgent implements ReservationAgent {
new InMemoryReservationAllocation(reservationId, contract, user,
plan.getQueueName(), firstStartTime,
findLatestTime(allocations.keySet()), allocations,
- plan.getResourceCalculator(), plan.getMinimumAllocation());
+ plan.getResourceCalculator(), plan.getMinimumAllocation(), hasGang);
if (oldReservation != null) {
return plan.updateReservation(capReservation);
} else {
@@ -242,13 +245,13 @@ public class GreedyReservationAgent implements ReservationAgent {
* previous instant in time until the time-window is exhausted or we placed
* all the user request.
*/
- private Map<ReservationInterval, ReservationRequest> placeSingleStage(
+ private Map<ReservationInterval, Resource> placeSingleStage(
Plan plan, RLESparseResourceAllocation tempAssigned,
ReservationRequest rr, long earliestStart, long curDeadline,
ReservationAllocation oldResAllocation, final Resource totalCapacity) {
- Map<ReservationInterval, ReservationRequest> allocationRequests =
- new HashMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> allocationRequests =
+ new HashMap<ReservationInterval, Resource>();
// compute the gang as a resource and get the duration
Resource gang = Resources.multiply(rr.getCapability(), rr.getConcurrency());
@@ -322,7 +325,7 @@ public class GreedyReservationAgent implements ReservationAgent {
ReservationInterval reservationInt =
new ReservationInterval(curDeadline - dur, curDeadline);
- ReservationRequest reservationRes =
+ ReservationRequest reservationRequest =
ReservationRequest.newInstance(rr.getCapability(),
rr.getConcurrency() * maxGang, rr.getConcurrency(),
rr.getDuration());
@@ -331,6 +334,8 @@ public class GreedyReservationAgent implements ReservationAgent {
// placing other ReservationRequest within the same
// ReservationDefinition,
// and we must avoid double-counting the available resources
+ final Resource reservationRes = ReservationSystemUtil.toResource(
+ reservationRequest);
tempAssigned.addInterval(reservationInt, reservationRes);
allocationRequests.put(reservationInt, reservationRes);
@@ -350,7 +355,7 @@ public class GreedyReservationAgent implements ReservationAgent {
// If we are here is becasue we did not manage to satisfy this request.
// So we need to remove unwanted side-effect from tempAssigned (needed
// for ANY).
- for (Map.Entry<ReservationInterval, ReservationRequest> tempAllocation :
+ for (Map.Entry<ReservationInterval, Resource> tempAllocation :
allocationRequests.entrySet()) {
tempAssigned.removeInterval(tempAllocation.getKey(),
tempAllocation.getValue());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
index ce2e7d7..50d66cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
@@ -31,7 +31,6 @@ import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.yarn.api.records.ReservationId;
-import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@@ -110,7 +109,7 @@ class InMemoryPlan implements Plan {
private void incrementAllocation(ReservationAllocation reservation) {
assert (readWriteLock.isWriteLockedByCurrentThread());
- Map<ReservationInterval, ReservationRequest> allocationRequests =
+ Map<ReservationInterval, Resource> allocationRequests =
reservation.getAllocationRequests();
// check if we have encountered the user earlier and if not add an entry
String user = reservation.getUser();
@@ -119,7 +118,7 @@ class InMemoryPlan implements Plan {
resAlloc = new RLESparseResourceAllocation(resCalc, minAlloc);
userResourceAlloc.put(user, resAlloc);
}
- for (Map.Entry<ReservationInterval, ReservationRequest> r : allocationRequests
+ for (Map.Entry<ReservationInterval, Resource> r : allocationRequests
.entrySet()) {
resAlloc.addInterval(r.getKey(), r.getValue());
rleSparseVector.addInterval(r.getKey(), r.getValue());
@@ -128,11 +127,11 @@ class InMemoryPlan implements Plan {
private void decrementAllocation(ReservationAllocation reservation) {
assert (readWriteLock.isWriteLockedByCurrentThread());
- Map<ReservationInterval, ReservationRequest> allocationRequests =
+ Map<ReservationInterval, Resource> allocationRequests =
reservation.getAllocationRequests();
String user = reservation.getUser();
RLESparseResourceAllocation resAlloc = userResourceAlloc.get(user);
- for (Map.Entry<ReservationInterval, ReservationRequest> r : allocationRequests
+ for (Map.Entry<ReservationInterval, Resource> r : allocationRequests
.entrySet()) {
resAlloc.removeInterval(r.getKey(), r.getValue());
rleSparseVector.removeInterval(r.getKey(), r.getValue());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java
index fc8407b..a4dd23b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryReservationAllocation.java
@@ -22,7 +22,6 @@ import java.util.Map;
import org.apache.hadoop.yarn.api.records.ReservationDefinition;
import org.apache.hadoop.yarn.api.records.ReservationId;
-import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
@@ -40,7 +39,7 @@ class InMemoryReservationAllocation implements ReservationAllocation {
private final ReservationDefinition contract;
private final long startTime;
private final long endTime;
- private final Map<ReservationInterval, ReservationRequest> allocationRequests;
+ private final Map<ReservationInterval, Resource> allocationRequests;
private boolean hasGang = false;
private long acceptedAt = -1;
@@ -49,22 +48,29 @@ class InMemoryReservationAllocation implements ReservationAllocation {
InMemoryReservationAllocation(ReservationId reservationID,
ReservationDefinition contract, String user, String planName,
long startTime, long endTime,
- Map<ReservationInterval, ReservationRequest> allocationRequests,
+ Map<ReservationInterval, Resource> allocations,
ResourceCalculator calculator, Resource minAlloc) {
+ this(reservationID, contract, user, planName, startTime, endTime,
+ allocations, calculator, minAlloc, false);
+ }
+
+ InMemoryReservationAllocation(ReservationId reservationID,
+ ReservationDefinition contract, String user, String planName,
+ long startTime, long endTime,
+ Map<ReservationInterval, Resource> allocations,
+ ResourceCalculator calculator, Resource minAlloc, boolean hasGang) {
this.contract = contract;
this.startTime = startTime;
this.endTime = endTime;
this.reservationID = reservationID;
this.user = user;
- this.allocationRequests = allocationRequests;
+ this.allocationRequests = allocations;
this.planName = planName;
+ this.hasGang = hasGang;
resourcesOverTime = new RLESparseResourceAllocation(calculator, minAlloc);
- for (Map.Entry<ReservationInterval, ReservationRequest> r : allocationRequests
+ for (Map.Entry<ReservationInterval, Resource> r : allocations
.entrySet()) {
resourcesOverTime.addInterval(r.getKey(), r.getValue());
- if (r.getValue().getConcurrency() > 1) {
- hasGang = true;
- }
}
}
@@ -89,7 +95,7 @@ class InMemoryReservationAllocation implements ReservationAllocation {
}
@Override
- public Map<ReservationInterval, ReservationRequest> getAllocationRequests() {
+ public Map<ReservationInterval, Resource> getAllocationRequests() {
return Collections.unmodifiableMap(allocationRequests);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
index 3f6f405..2957cc6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Iterator;
-import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NavigableMap;
@@ -31,9 +30,7 @@ import java.util.TreeMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
@@ -80,14 +77,11 @@ public class RLESparseResourceAllocation {
*
* @param reservationInterval the interval for which the resource is to be
* added
- * @param capacity the resource to be added
+ * @param totCap the resource to be added
* @return true if addition is successful, false otherwise
*/
public boolean addInterval(ReservationInterval reservationInterval,
- ReservationRequest capacity) {
- Resource totCap =
- Resources.multiply(capacity.getCapability(),
- (float) capacity.getNumContainers());
+ Resource totCap) {
if (totCap.equals(ZERO_RESOURCE)) {
return true;
}
@@ -143,44 +137,15 @@ public class RLESparseResourceAllocation {
}
/**
- * Add multiple resources for the specified interval
- *
- * @param reservationInterval the interval for which the resource is to be
- * added
- * @param ReservationRequests the resources to be added
- * @param clusterResource the total resources in the cluster
- * @return true if addition is successful, false otherwise
- */
- public boolean addCompositeInterval(ReservationInterval reservationInterval,
- List<ReservationRequest> ReservationRequests, Resource clusterResource) {
- ReservationRequest aggregateReservationRequest =
- Records.newRecord(ReservationRequest.class);
- Resource capacity = Resource.newInstance(0, 0);
- for (ReservationRequest ReservationRequest : ReservationRequests) {
- Resources.addTo(capacity, Resources.multiply(
- ReservationRequest.getCapability(),
- ReservationRequest.getNumContainers()));
- }
- aggregateReservationRequest.setNumContainers((int) Math.ceil(Resources
- .divide(resourceCalculator, clusterResource, capacity, minAlloc)));
- aggregateReservationRequest.setCapability(minAlloc);
-
- return addInterval(reservationInterval, aggregateReservationRequest);
- }
-
- /**
* Removes a resource for the specified interval
*
* @param reservationInterval the interval for which the resource is to be
* removed
- * @param capacity the resource to be removed
+ * @param totCap the resource to be removed
* @return true if removal is successful, false otherwise
*/
public boolean removeInterval(ReservationInterval reservationInterval,
- ReservationRequest capacity) {
- Resource totCap =
- Resources.multiply(capacity.getCapability(),
- (float) capacity.getNumContainers());
+ Resource totCap) {
if (totCap.equals(ZERO_RESOURCE)) {
return true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java
index 89c0e55..0d3c692 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationAllocation.java
@@ -22,7 +22,6 @@ import java.util.Map;
import org.apache.hadoop.yarn.api.records.ReservationDefinition;
import org.apache.hadoop.yarn.api.records.ReservationId;
-import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.Resource;
/**
@@ -71,7 +70,7 @@ public interface ReservationAllocation extends
* @return the allocationRequests the map of resources requested against the
* time interval for which they were
*/
- public Map<ReservationInterval, ReservationRequest> getAllocationRequests();
+ public Map<ReservationInterval, Resource> getAllocationRequests();
/**
* Return a string identifying the plan to which the reservation belongs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java
new file mode 100644
index 0000000..8affae4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.reservation;
+
+import org.apache.hadoop.yarn.api.records.ReservationRequest;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+import java.util.HashMap;
+import java.util.Map;
+
+final class ReservationSystemUtil {
+
+ private ReservationSystemUtil() {
+ // not called
+ }
+
+ public static Resource toResource(ReservationRequest request) {
+ Resource resource = Resources.multiply(request.getCapability(),
+ (float) request.getNumContainers());
+ return resource;
+ }
+
+ public static Map<ReservationInterval, Resource> toResources(
+ Map<ReservationInterval, ReservationRequest> allocations) {
+ Map<ReservationInterval, Resource> resources =
+ new HashMap<ReservationInterval, Resource>();
+ for (Map.Entry<ReservationInterval, ReservationRequest> entry :
+ allocations.entrySet()) {
+ resources.put(entry.getKey(),
+ toResource(entry.getValue()));
+ }
+ return resources;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
index bfaf06b..be1d69a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
@@ -378,14 +378,15 @@ public class ReservationSystemTestUtil {
return rr;
}
- public static Map<ReservationInterval, ReservationRequest> generateAllocation(
+ public static Map<ReservationInterval, Resource> generateAllocation(
long startTime, long step, int[] alloc) {
- Map<ReservationInterval, ReservationRequest> req =
- new TreeMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> req =
+ new TreeMap<ReservationInterval, Resource>();
for (int i = 0; i < alloc.length; i++) {
req.put(new ReservationInterval(startTime + i * step, startTime + (i + 1)
- * step), ReservationRequest.newInstance(
- Resource.newInstance(1024, 1), alloc[i]));
+ * step), ReservationSystemUtil.toResource(ReservationRequest
+ .newInstance(
+ Resource.newInstance(1024, 1), alloc[i])));
}
return req;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityOverTimePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityOverTimePolicy.java
index 61561e9..19f876d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityOverTimePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacityOverTimePolicy.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.Map;
@@ -198,12 +197,14 @@ public class TestCapacityOverTimePolicy {
@Test(expected = PlanningQuotaException.class)
public void testFailAvg() throws IOException, PlanningException {
// generate an allocation which violates the 25% average single-shot
- Map<ReservationInterval, ReservationRequest> req =
- new TreeMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> req =
+ new TreeMap<ReservationInterval, Resource>();
long win = timeWindow / 2 + 100;
int cont = (int) Math.ceil(0.5 * totCont);
req.put(new ReservationInterval(initTime, initTime + win),
- ReservationRequest.newInstance(Resource.newInstance(1024, 1), cont));
+ ReservationSystemUtil.toResource(
+ ReservationRequest.newInstance(Resource.newInstance(1024, 1),
+ cont)));
assertTrue(plan.toString(),
plan.addReservation(new InMemoryReservationAllocation(
@@ -214,12 +215,13 @@ public class TestCapacityOverTimePolicy {
@Test
public void testFailAvgBySum() throws IOException, PlanningException {
// generate an allocation which violates the 25% average by sum
- Map<ReservationInterval, ReservationRequest> req =
- new TreeMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> req =
+ new TreeMap<ReservationInterval, Resource>();
long win = 86400000 / 4 + 1;
int cont = (int) Math.ceil(0.5 * totCont);
req.put(new ReservationInterval(initTime, initTime + win),
- ReservationRequest.newInstance(Resource.newInstance(1024, 1), cont));
+ ReservationSystemUtil.toResource(ReservationRequest.newInstance(Resource
+ .newInstance(1024, 1), cont)));
assertTrue(plan.toString(),
plan.addReservation(new InMemoryReservationAllocation(
ReservationSystemTestUtil.getNewReservationId(), null, "u1",
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestGreedyReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestGreedyReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestGreedyReservationAgent.java
index b8cf6c5..de94dcd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestGreedyReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestGreedyReservationAgent.java
@@ -516,7 +516,7 @@ public class TestGreedyReservationAgent {
.generateAllocation(0, step, f), res, minAlloc)));
int[] f2 = { 5, 5, 5, 5, 5, 5, 5 };
- Map<ReservationInterval, ReservationRequest> alloc =
+ Map<ReservationInterval, Resource> alloc =
ReservationSystemTestUtil.generateAllocation(5000, step, f2);
assertTrue(plan.toString(),
plan.addReservation(new InMemoryReservationAllocation(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
index 91c1962..722fb29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryPlan.java
@@ -100,9 +100,11 @@ public class TestInMemoryPlan {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length,
alloc.length, allocations.values());
+ Map<ReservationInterval, Resource> allocs =
+ ReservationSystemUtil.toResources(allocations);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
- start, start + alloc.length, allocations, resCalc, minAlloc);
+ start, start + alloc.length, allocs, resCalc, minAlloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.addReservation(rAllocation);
@@ -132,9 +134,11 @@ public class TestInMemoryPlan {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length,
alloc.length, allocations.values());
+ Map<ReservationInterval, Resource> allocs = ReservationSystemUtil.toResources
+ (allocations);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
- start, start + alloc.length, allocations, resCalc, minAlloc);
+ start, start + alloc.length, allocs, resCalc, minAlloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.addReservation(rAllocation);
@@ -158,9 +162,11 @@ public class TestInMemoryPlan {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length,
alloc.length, allocations.values());
+ Map<ReservationInterval, Resource> allocs = ReservationSystemUtil.toResources
+ (allocations);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
- start, start + alloc.length, allocations, resCalc, minAlloc);
+ start, start + alloc.length, allocs, resCalc, minAlloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.addReservation(rAllocation);
@@ -202,9 +208,11 @@ public class TestInMemoryPlan {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length,
alloc.length, allocations.values());
+ Map<ReservationInterval, Resource> allocs = ReservationSystemUtil.toResources
+ (allocations);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
- start, start + alloc.length, allocations, resCalc, minAlloc);
+ start, start + alloc.length, allocs, resCalc, minAlloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.addReservation(rAllocation);
@@ -226,9 +234,12 @@ public class TestInMemoryPlan {
rDef =
createSimpleReservationDefinition(start, start + updatedAlloc.length,
updatedAlloc.length, allocations.values());
+ Map<ReservationInterval, Resource> updatedAllocs =
+ ReservationSystemUtil.toResources(allocations);
rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
- start, start + updatedAlloc.length, allocations, resCalc, minAlloc);
+ start, start + updatedAlloc.length, updatedAllocs, resCalc,
+ minAlloc);
try {
plan.updateReservation(rAllocation);
} catch (PlanningException e) {
@@ -260,9 +271,11 @@ public class TestInMemoryPlan {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length,
alloc.length, allocations.values());
+ Map<ReservationInterval, Resource> allocs =
+ ReservationSystemUtil.toResources(allocations);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
- start, start + alloc.length, allocations, resCalc, minAlloc);
+ start, start + alloc.length, allocs, resCalc, minAlloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.updateReservation(rAllocation);
@@ -290,9 +303,11 @@ public class TestInMemoryPlan {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length,
alloc.length, allocations.values());
+ Map<ReservationInterval, Resource> allocs =
+ ReservationSystemUtil.toResources(allocations);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
- start, start + alloc.length, allocations, resCalc, minAlloc);
+ start, start + alloc.length, allocs, resCalc, minAlloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.addReservation(rAllocation);
@@ -359,9 +374,11 @@ public class TestInMemoryPlan {
ReservationDefinition rDef1 =
createSimpleReservationDefinition(start, start + alloc1.length,
alloc1.length, allocations1.values());
+ Map<ReservationInterval, Resource> allocs1 =
+ ReservationSystemUtil.toResources(allocations1);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID1, rDef1, user,
- planName, start, start + alloc1.length, allocations1, resCalc,
+ planName, start, start + alloc1.length, allocs1, resCalc,
minAlloc);
Assert.assertNull(plan.getReservationById(reservationID1));
try {
@@ -388,9 +405,11 @@ public class TestInMemoryPlan {
ReservationDefinition rDef2 =
createSimpleReservationDefinition(start, start + alloc2.length,
alloc2.length, allocations2.values());
+ Map<ReservationInterval, Resource> allocs2 =
+ ReservationSystemUtil.toResources(allocations2);
rAllocation =
new InMemoryReservationAllocation(reservationID2, rDef2, user,
- planName, start, start + alloc2.length, allocations2, resCalc,
+ planName, start, start + alloc2.length, allocs2, resCalc,
minAlloc);
Assert.assertNull(plan.getReservationById(reservationID2));
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryReservationAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryReservationAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryReservationAllocation.java
index 76f39dc..55224a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryReservationAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestInMemoryReservationAllocation.java
@@ -69,7 +69,7 @@ public class TestInMemoryReservationAllocation {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length + 1,
alloc.length);
- Map<ReservationInterval, ReservationRequest> allocations =
+ Map<ReservationInterval, Resource> allocations =
generateAllocation(start, alloc, false, false);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
@@ -91,7 +91,7 @@ public class TestInMemoryReservationAllocation {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length + 1,
alloc.length);
- Map<ReservationInterval, ReservationRequest> allocations =
+ Map<ReservationInterval, Resource> allocations =
generateAllocation(start, alloc, true, false);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
@@ -114,7 +114,7 @@ public class TestInMemoryReservationAllocation {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length + 1,
alloc.length);
- Map<ReservationInterval, ReservationRequest> allocations =
+ Map<ReservationInterval, Resource> allocations =
generateAllocation(start, alloc, true, false);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
@@ -137,8 +137,8 @@ public class TestInMemoryReservationAllocation {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length + 1,
alloc.length);
- Map<ReservationInterval, ReservationRequest> allocations =
- new HashMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> allocations =
+ new HashMap<ReservationInterval, Resource>();
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
start, start + alloc.length + 1, allocations, resCalc, minAlloc);
@@ -156,11 +156,13 @@ public class TestInMemoryReservationAllocation {
ReservationDefinition rDef =
createSimpleReservationDefinition(start, start + alloc.length + 1,
alloc.length);
- Map<ReservationInterval, ReservationRequest> allocations =
- generateAllocation(start, alloc, false, true);
+ boolean isGang = true;
+ Map<ReservationInterval, Resource> allocations =
+ generateAllocation(start, alloc, false, isGang);
ReservationAllocation rAllocation =
new InMemoryReservationAllocation(reservationID, rDef, user, planName,
- start, start + alloc.length + 1, allocations, resCalc, minAlloc);
+ start, start + alloc.length + 1, allocations, resCalc, minAlloc,
+ isGang);
doAssertions(rAllocation, reservationID, rDef, allocations, start, alloc);
Assert.assertTrue(rAllocation.containsGangs());
for (int i = 0; i < alloc.length; i++) {
@@ -171,7 +173,7 @@ public class TestInMemoryReservationAllocation {
private void doAssertions(ReservationAllocation rAllocation,
ReservationId reservationID, ReservationDefinition rDef,
- Map<ReservationInterval, ReservationRequest> allocations, int start,
+ Map<ReservationInterval, Resource> allocations, int start,
int[] alloc) {
Assert.assertEquals(reservationID, rAllocation.getReservationId());
Assert.assertEquals(rDef, rAllocation.getReservationDefinition());
@@ -198,10 +200,10 @@ public class TestInMemoryReservationAllocation {
return rDef;
}
- private Map<ReservationInterval, ReservationRequest> generateAllocation(
+ private Map<ReservationInterval, Resource> generateAllocation(
int startTime, int[] alloc, boolean isStep, boolean isGang) {
- Map<ReservationInterval, ReservationRequest> req =
- new HashMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> req =
+ new HashMap<ReservationInterval, Resource>();
int numContainers = 0;
for (int i = 0; i < alloc.length; i++) {
if (isStep) {
@@ -215,7 +217,8 @@ public class TestInMemoryReservationAllocation {
if (isGang) {
rr.setConcurrency(numContainers);
}
- req.put(new ReservationInterval(startTime + i, startTime + i + 1), rr);
+ req.put(new ReservationInterval(startTime + i, startTime + i + 1),
+ ReservationSystemUtil.toResource(rr));
}
return req;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
index c7301c7..d0f4dc6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
@@ -46,9 +46,9 @@ public class TestRLESparseResourceAllocation {
new RLESparseResourceAllocation(resCalc, minAlloc);
int[] alloc = { 10, 10, 10, 10, 10, 10 };
int start = 100;
- Set<Entry<ReservationInterval, ReservationRequest>> inputs =
+ Set<Entry<ReservationInterval, Resource>> inputs =
generateAllocation(start, alloc, false).entrySet();
- for (Entry<ReservationInterval, ReservationRequest> ip : inputs) {
+ for (Entry<ReservationInterval, Resource> ip : inputs) {
rleSparseVector.addInterval(ip.getKey(), ip.getValue());
}
LOG.info(rleSparseVector.toString());
@@ -63,7 +63,7 @@ public class TestRLESparseResourceAllocation {
}
Assert.assertEquals(Resource.newInstance(0, 0),
rleSparseVector.getCapacityAtTime(start + alloc.length + 2));
- for (Entry<ReservationInterval, ReservationRequest> ip : inputs) {
+ for (Entry<ReservationInterval, Resource> ip : inputs) {
rleSparseVector.removeInterval(ip.getKey(), ip.getValue());
}
LOG.info(rleSparseVector.toString());
@@ -83,9 +83,9 @@ public class TestRLESparseResourceAllocation {
new RLESparseResourceAllocation(resCalc, minAlloc);
int[] alloc = { 10, 10, 10, 10, 10, 10 };
int start = 100;
- Set<Entry<ReservationInterval, ReservationRequest>> inputs =
+ Set<Entry<ReservationInterval, Resource>> inputs =
generateAllocation(start, alloc, true).entrySet();
- for (Entry<ReservationInterval, ReservationRequest> ip : inputs) {
+ for (Entry<ReservationInterval, Resource> ip : inputs) {
rleSparseVector.addInterval(ip.getKey(), ip.getValue());
}
LOG.info(rleSparseVector.toString());
@@ -101,8 +101,8 @@ public class TestRLESparseResourceAllocation {
}
Assert.assertEquals(Resource.newInstance(0, 0),
rleSparseVector.getCapacityAtTime(start + alloc.length + 2));
- for (Entry<ReservationInterval, ReservationRequest> ip : inputs) {
- rleSparseVector.removeInterval(ip.getKey(), ip.getValue());
+ for (Entry<ReservationInterval, Resource> ip : inputs) {
+ rleSparseVector.removeInterval(ip.getKey(),ip.getValue());
}
LOG.info(rleSparseVector.toString());
for (int i = 0; i < alloc.length; i++) {
@@ -121,9 +121,9 @@ public class TestRLESparseResourceAllocation {
new RLESparseResourceAllocation(resCalc, minAlloc);
int[] alloc = { 0, 5, 10, 10, 5, 0 };
int start = 100;
- Set<Entry<ReservationInterval, ReservationRequest>> inputs =
+ Set<Entry<ReservationInterval, Resource>> inputs =
generateAllocation(start, alloc, true).entrySet();
- for (Entry<ReservationInterval, ReservationRequest> ip : inputs) {
+ for (Entry<ReservationInterval, Resource> ip : inputs) {
rleSparseVector.addInterval(ip.getKey(), ip.getValue());
}
LOG.info(rleSparseVector.toString());
@@ -139,7 +139,7 @@ public class TestRLESparseResourceAllocation {
}
Assert.assertEquals(Resource.newInstance(0, 0),
rleSparseVector.getCapacityAtTime(start + alloc.length + 2));
- for (Entry<ReservationInterval, ReservationRequest> ip : inputs) {
+ for (Entry<ReservationInterval, Resource> ip : inputs) {
rleSparseVector.removeInterval(ip.getKey(), ip.getValue());
}
LOG.info(rleSparseVector.toString());
@@ -157,17 +157,17 @@ public class TestRLESparseResourceAllocation {
RLESparseResourceAllocation rleSparseVector =
new RLESparseResourceAllocation(resCalc, minAlloc);
rleSparseVector.addInterval(new ReservationInterval(0, Long.MAX_VALUE),
- ReservationRequest.newInstance(Resource.newInstance(0, 0), (0)));
+ Resource.newInstance(0, 0));
LOG.info(rleSparseVector.toString());
Assert.assertEquals(Resource.newInstance(0, 0),
rleSparseVector.getCapacityAtTime(new Random().nextLong()));
Assert.assertTrue(rleSparseVector.isEmpty());
}
- private Map<ReservationInterval, ReservationRequest> generateAllocation(
+ private Map<ReservationInterval, Resource> generateAllocation(
int startTime, int[] alloc, boolean isStep) {
- Map<ReservationInterval, ReservationRequest> req =
- new HashMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> req =
+ new HashMap<ReservationInterval, Resource>();
int numContainers = 0;
for (int i = 0; i < alloc.length; i++) {
if (isStep) {
@@ -176,9 +176,8 @@ public class TestRLESparseResourceAllocation {
numContainers = alloc[i];
}
req.put(new ReservationInterval(startTime + i, startTime + i + 1),
-
- ReservationRequest.newInstance(Resource.newInstance(1024, 1),
- (numContainers)));
+ ReservationSystemUtil.toResource(ReservationRequest.newInstance(
+ Resource.newInstance(1024, 1), (numContainers))));
}
return req;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e602fa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestSimpleCapacityReplanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestSimpleCapacityReplanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestSimpleCapacityReplanner.java
index 1ca9f2e..d4a97ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestSimpleCapacityReplanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestSimpleCapacityReplanner.java
@@ -146,14 +146,15 @@ public class TestSimpleCapacityReplanner {
}
}
- private Map<ReservationInterval, ReservationRequest> generateAllocation(
+ private Map<ReservationInterval, Resource> generateAllocation(
int startTime, int[] alloc) {
- Map<ReservationInterval, ReservationRequest> req =
- new TreeMap<ReservationInterval, ReservationRequest>();
+ Map<ReservationInterval, Resource> req =
+ new TreeMap<ReservationInterval, Resource>();
for (int i = 0; i < alloc.length; i++) {
req.put(new ReservationInterval(startTime + i, startTime + i + 1),
- ReservationRequest.newInstance(Resource.newInstance(1024, 1),
- alloc[i]));
+ ReservationSystemUtil.toResource(
+ ReservationRequest.newInstance(Resource.newInstance(1024, 1),
+ alloc[i])));
}
return req;
}
[02/21] hadoop git commit: HDFS-8726. Move protobuf files that define
the client-sever protocols to hdfs-client. Contributed by Haohui Mai.
Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
deleted file mode 100644
index e0d1f5f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
+++ /dev/null
@@ -1,247 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and stable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *stable* .proto interface.
- */
-
-// This file contains protocol buffers that are used throughout HDFS -- i.e.
-// by the client, server, and data transfer protocols.
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "ClientDatanodeProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-import "Security.proto";
-import "hdfs.proto";
-
-/**
- * block - block for which visible length is requested
- */
-message GetReplicaVisibleLengthRequestProto {
- required ExtendedBlockProto block = 1;
-}
-
-/**
- * length - visible length of the block
- */
-message GetReplicaVisibleLengthResponseProto {
- required uint64 length = 1;
-}
-
-/**
- * void request
- */
-message RefreshNamenodesRequestProto {
-}
-
-/**
- * void response
- */
-message RefreshNamenodesResponseProto {
-}
-
-/**
- * blockPool - block pool to be deleted
- * force - if false, delete the block pool only if it is empty.
- * if true, delete the block pool even if it has blocks.
- */
-message DeleteBlockPoolRequestProto {
- required string blockPool = 1;
- required bool force = 2;
-}
-
-/**
- * void response
- */
-message DeleteBlockPoolResponseProto {
-}
-
-/**
- * Gets the file information where block and its metadata is stored
- * block - block for which path information is being requested
- * token - block token
- *
- * This message is deprecated in favor of file descriptor passing.
- */
-message GetBlockLocalPathInfoRequestProto {
- required ExtendedBlockProto block = 1;
- required hadoop.common.TokenProto token = 2;
-}
-
-/**
- * block - block for which file path information is being returned
- * localPath - file path where the block data is stored
- * localMetaPath - file path where the block meta data is stored
- *
- * This message is deprecated in favor of file descriptor passing.
- */
-message GetBlockLocalPathInfoResponseProto {
- required ExtendedBlockProto block = 1;
- required string localPath = 2;
- required string localMetaPath = 3;
-}
-
-/**
- * Query for the disk locations of a number of blocks on this DN.
- * blockPoolId - the pool to query
- * blockIds - list of block IDs to query
- * tokens - list of access tokens corresponding to list of block IDs
- */
-message GetHdfsBlockLocationsRequestProto {
- // Removed: HDFS-3969
- // repeated ExtendedBlockProto blocks = 1;
- repeated hadoop.common.TokenProto tokens = 2;
-
- required string blockPoolId = 3;
- repeated sfixed64 blockIds = 4 [ packed = true ];
-}
-
-/**
- * volumeIds - id of each volume, potentially multiple bytes
- * volumeIndexes - for each block, an index into volumeIds specifying the volume
- * on which it is located. If block is not present on any volume,
- * index is set to MAX_INT.
- */
-message GetHdfsBlockLocationsResponseProto {
- repeated bytes volumeIds = 1;
- repeated uint32 volumeIndexes = 2 [ packed = true ];
-}
-
-/**
- * forUpgrade - if true, clients are advised to wait for restart and quick
- * upgrade restart is instrumented. Otherwise, datanode does
- * the regular shutdown.
- */
-message ShutdownDatanodeRequestProto {
- required bool forUpgrade = 1;
-}
-
-message ShutdownDatanodeResponseProto {
-}
-
-/**
- * Ping datanode for liveness and quick info
- */
-message GetDatanodeInfoRequestProto {
-}
-
-message GetDatanodeInfoResponseProto {
- required DatanodeLocalInfoProto localInfo = 1;
-}
-
-/** Asks DataNode to reload configuration file. */
-message StartReconfigurationRequestProto {
-}
-
-message StartReconfigurationResponseProto {
-}
-
-message TriggerBlockReportRequestProto {
- required bool incremental = 1;
-}
-
-message TriggerBlockReportResponseProto {
-}
-
-/** Query the running status of reconfiguration process */
-message GetReconfigurationStatusRequestProto {
-}
-
-message GetReconfigurationStatusConfigChangeProto {
- required string name = 1;
- required string oldValue = 2;
- optional string newValue = 3;
- optional string errorMessage = 4; // It is empty if success.
-}
-
-message GetReconfigurationStatusResponseProto {
- required int64 startTime = 1;
- optional int64 endTime = 2;
- repeated GetReconfigurationStatusConfigChangeProto changes = 3;
-}
-
-message ListReconfigurablePropertiesRequestProto {
-}
-
-/** Query the reconfigurable properties on DataNode. */
-message ListReconfigurablePropertiesResponseProto {
- repeated string name = 1;
-}
-
-/**
- * Protocol used from client to the Datanode.
- * See the request and response for details of rpc call.
- */
-service ClientDatanodeProtocolService {
- /**
- * Returns the visible length of the replica
- */
- rpc getReplicaVisibleLength(GetReplicaVisibleLengthRequestProto)
- returns(GetReplicaVisibleLengthResponseProto);
-
- /**
- * Refresh the list of federated namenodes from updated configuration.
- * Adds new namenodes and stops the deleted namenodes.
- */
- rpc refreshNamenodes(RefreshNamenodesRequestProto)
- returns(RefreshNamenodesResponseProto);
-
- /**
- * Delete the block pool from the datanode.
- */
- rpc deleteBlockPool(DeleteBlockPoolRequestProto)
- returns(DeleteBlockPoolResponseProto);
-
- /**
- * Retrieves the path names of the block file and metadata file stored on the
- * local file system.
- */
- rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto)
- returns(GetBlockLocalPathInfoResponseProto);
-
- /**
- * Retrieve additional HDFS-specific metadata about a set of blocks stored
- * on the local file system.
- */
- rpc getHdfsBlockLocations(GetHdfsBlockLocationsRequestProto)
- returns(GetHdfsBlockLocationsResponseProto);
-
- rpc shutdownDatanode(ShutdownDatanodeRequestProto)
- returns(ShutdownDatanodeResponseProto);
-
- rpc getDatanodeInfo(GetDatanodeInfoRequestProto)
- returns(GetDatanodeInfoResponseProto);
-
- rpc getReconfigurationStatus(GetReconfigurationStatusRequestProto)
- returns(GetReconfigurationStatusResponseProto);
-
- rpc startReconfiguration(StartReconfigurationRequestProto)
- returns(StartReconfigurationResponseProto);
-
- rpc listReconfigurableProperties(
- ListReconfigurablePropertiesRequestProto)
- returns(ListReconfigurablePropertiesResponseProto);
-
- rpc triggerBlockReport(TriggerBlockReportRequestProto)
- returns(TriggerBlockReportResponseProto);
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
deleted file mode 100644
index b44c556..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ /dev/null
@@ -1,863 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and stable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *stable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "ClientNamenodeProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-import "Security.proto";
-import "hdfs.proto";
-import "acl.proto";
-import "xattr.proto";
-import "encryption.proto";
-import "inotify.proto";
-
-/**
- * The ClientNamenodeProtocol Service defines the interface between a client
- * (as runnign inside a MR Task) and the Namenode.
- * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc
- * for each of the methods.
- * The exceptions declared in the above class also apply to this protocol.
- * Exceptions are unwrapped and thrown by the PB libraries.
- */
-
-message GetBlockLocationsRequestProto {
- required string src = 1; // file name
- required uint64 offset = 2; // range start offset
- required uint64 length = 3; // range length
-}
-
-message GetBlockLocationsResponseProto {
- optional LocatedBlocksProto locations = 1;
-}
-
-message GetServerDefaultsRequestProto { // No parameters
-}
-
-message GetServerDefaultsResponseProto {
- required FsServerDefaultsProto serverDefaults = 1;
-}
-
-enum CreateFlagProto {
- CREATE = 0x01; // Create a file
- OVERWRITE = 0x02; // Truncate/overwrite a file. Same as POSIX O_TRUNC
- APPEND = 0x04; // Append to a file
- LAZY_PERSIST = 0x10; // File with reduced durability guarantees.
- NEW_BLOCK = 0x20; // Write data to a new block when appending
-}
-
-message CreateRequestProto {
- required string src = 1;
- required FsPermissionProto masked = 2;
- required string clientName = 3;
- required uint32 createFlag = 4; // bits set using CreateFlag
- required bool createParent = 5;
- required uint32 replication = 6; // Short: Only 16 bits used
- required uint64 blockSize = 7;
- repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8;
-}
-
-message CreateResponseProto {
- optional HdfsFileStatusProto fs = 1;
-}
-
-message AppendRequestProto {
- required string src = 1;
- required string clientName = 2;
- optional uint32 flag = 3; // bits set using CreateFlag
-}
-
-message AppendResponseProto {
- optional LocatedBlockProto block = 1;
- optional HdfsFileStatusProto stat = 2;
-}
-
-message SetReplicationRequestProto {
- required string src = 1;
- required uint32 replication = 2; // Short: Only 16 bits used
-}
-
-message SetReplicationResponseProto {
- required bool result = 1;
-}
-
-message SetStoragePolicyRequestProto {
- required string src = 1;
- required string policyName = 2;
-}
-
-message SetStoragePolicyResponseProto { // void response
-}
-
-message GetStoragePoliciesRequestProto { // void request
-}
-
-message GetStoragePoliciesResponseProto {
- repeated BlockStoragePolicyProto policies = 1;
-}
-
-message SetPermissionRequestProto {
- required string src = 1;
- required FsPermissionProto permission = 2;
-}
-
-message SetPermissionResponseProto { // void response
-}
-
-message SetOwnerRequestProto {
- required string src = 1;
- optional string username = 2;
- optional string groupname = 3;
-}
-
-message SetOwnerResponseProto { // void response
-}
-
-message AbandonBlockRequestProto {
- required ExtendedBlockProto b = 1;
- required string src = 2;
- required string holder = 3;
- optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID
-}
-
-message AbandonBlockResponseProto { // void response
-}
-
-message AddBlockRequestProto {
- required string src = 1;
- required string clientName = 2;
- optional ExtendedBlockProto previous = 3;
- repeated DatanodeInfoProto excludeNodes = 4;
- optional uint64 fileId = 5 [default = 0]; // default as a bogus id
- repeated string favoredNodes = 6; //the set of datanodes to use for the block
-}
-
-message AddBlockResponseProto {
- required LocatedBlockProto block = 1;
-}
-
-message GetAdditionalDatanodeRequestProto {
- required string src = 1;
- required ExtendedBlockProto blk = 2;
- repeated DatanodeInfoProto existings = 3;
- repeated DatanodeInfoProto excludes = 4;
- required uint32 numAdditionalNodes = 5;
- required string clientName = 6;
- repeated string existingStorageUuids = 7;
- optional uint64 fileId = 8 [default = 0]; // default to GRANDFATHER_INODE_ID
-}
-
-message GetAdditionalDatanodeResponseProto {
- required LocatedBlockProto block = 1;
-}
-
-message CompleteRequestProto {
- required string src = 1;
- required string clientName = 2;
- optional ExtendedBlockProto last = 3;
- optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID
-}
-
-message CompleteResponseProto {
- required bool result = 1;
-}
-
-message ReportBadBlocksRequestProto {
- repeated LocatedBlockProto blocks = 1;
-}
-
-message ReportBadBlocksResponseProto { // void response
-}
-
-message ConcatRequestProto {
- required string trg = 1;
- repeated string srcs = 2;
-}
-
-message ConcatResponseProto { // void response
-}
-
-message TruncateRequestProto {
- required string src = 1;
- required uint64 newLength = 2;
- required string clientName = 3;
-}
-
-message TruncateResponseProto {
- required bool result = 1;
-}
-
-message RenameRequestProto {
- required string src = 1;
- required string dst = 2;
-}
-
-message RenameResponseProto {
- required bool result = 1;
-}
-
-
-message Rename2RequestProto {
- required string src = 1;
- required string dst = 2;
- required bool overwriteDest = 3;
-}
-
-message Rename2ResponseProto { // void response
-}
-
-message DeleteRequestProto {
- required string src = 1;
- required bool recursive = 2;
-}
-
-message DeleteResponseProto {
- required bool result = 1;
-}
-
-message MkdirsRequestProto {
- required string src = 1;
- required FsPermissionProto masked = 2;
- required bool createParent = 3;
-}
-message MkdirsResponseProto {
- required bool result = 1;
-}
-
-message GetListingRequestProto {
- required string src = 1;
- required bytes startAfter = 2;
- required bool needLocation = 3;
-}
-message GetListingResponseProto {
- optional DirectoryListingProto dirList = 1;
-}
-
-message GetSnapshottableDirListingRequestProto { // no input parameters
-}
-message GetSnapshottableDirListingResponseProto {
- optional SnapshottableDirectoryListingProto snapshottableDirList = 1;
-}
-
-message GetSnapshotDiffReportRequestProto {
- required string snapshotRoot = 1;
- required string fromSnapshot = 2;
- required string toSnapshot = 3;
-}
-message GetSnapshotDiffReportResponseProto {
- required SnapshotDiffReportProto diffReport = 1;
-}
-
-message RenewLeaseRequestProto {
- required string clientName = 1;
-}
-
-message RenewLeaseResponseProto { //void response
-}
-
-message RecoverLeaseRequestProto {
- required string src = 1;
- required string clientName = 2;
-}
-message RecoverLeaseResponseProto {
- required bool result = 1;
-}
-
-message GetFsStatusRequestProto { // no input paramters
-}
-
-message GetFsStatsResponseProto {
- required uint64 capacity = 1;
- required uint64 used = 2;
- required uint64 remaining = 3;
- required uint64 under_replicated = 4;
- required uint64 corrupt_blocks = 5;
- required uint64 missing_blocks = 6;
- optional uint64 missing_repl_one_blocks = 7;
-}
-
-enum DatanodeReportTypeProto { // type of the datanode report
- ALL = 1;
- LIVE = 2;
- DEAD = 3;
- DECOMMISSIONING = 4;
-}
-
-message GetDatanodeReportRequestProto {
- required DatanodeReportTypeProto type = 1;
-}
-
-message GetDatanodeReportResponseProto {
- repeated DatanodeInfoProto di = 1;
-}
-
-message GetDatanodeStorageReportRequestProto {
- required DatanodeReportTypeProto type = 1;
-}
-
-message DatanodeStorageReportProto {
- required DatanodeInfoProto datanodeInfo = 1;
- repeated StorageReportProto storageReports = 2;
-}
-
-message GetDatanodeStorageReportResponseProto {
- repeated DatanodeStorageReportProto datanodeStorageReports = 1;
-}
-
-message GetPreferredBlockSizeRequestProto {
- required string filename = 1;
-}
-
-message GetPreferredBlockSizeResponseProto {
- required uint64 bsize = 1;
-}
-
-enum SafeModeActionProto {
- SAFEMODE_LEAVE = 1;
- SAFEMODE_ENTER = 2;
- SAFEMODE_GET = 3;
-}
-
-message SetSafeModeRequestProto {
- required SafeModeActionProto action = 1;
- optional bool checked = 2 [default = false];
-}
-
-message SetSafeModeResponseProto {
- required bool result = 1;
-}
-
-message SaveNamespaceRequestProto {
- optional uint64 timeWindow = 1 [default = 0];
- optional uint64 txGap = 2 [default = 0];
-}
-
-message SaveNamespaceResponseProto { // void response
- optional bool saved = 1 [default = true];
-}
-
-message RollEditsRequestProto { // no parameters
-}
-
-message RollEditsResponseProto { // response
- required uint64 newSegmentTxId = 1;
-}
-
-message RestoreFailedStorageRequestProto {
- required string arg = 1;
-}
-
-message RestoreFailedStorageResponseProto {
- required bool result = 1;
-}
-
-message RefreshNodesRequestProto { // no parameters
-}
-
-message RefreshNodesResponseProto { // void response
-}
-
-message FinalizeUpgradeRequestProto { // no parameters
-}
-
-message FinalizeUpgradeResponseProto { // void response
-}
-
-enum RollingUpgradeActionProto {
- QUERY = 1;
- START = 2;
- FINALIZE = 3;
-}
-
-message RollingUpgradeRequestProto {
- required RollingUpgradeActionProto action = 1;
-}
-
-message RollingUpgradeInfoProto {
- required RollingUpgradeStatusProto status = 1;
- required uint64 startTime = 2;
- required uint64 finalizeTime = 3;
- required bool createdRollbackImages = 4;
-}
-
-message RollingUpgradeResponseProto {
- optional RollingUpgradeInfoProto rollingUpgradeInfo= 1;
-}
-
-message ListCorruptFileBlocksRequestProto {
- required string path = 1;
- optional string cookie = 2;
-}
-
-message ListCorruptFileBlocksResponseProto {
- required CorruptFileBlocksProto corrupt = 1;
-}
-
-message MetaSaveRequestProto {
- required string filename = 1;
-}
-
-message MetaSaveResponseProto { // void response
-}
-
-message GetFileInfoRequestProto {
- required string src = 1;
-}
-
-message GetFileInfoResponseProto {
- optional HdfsFileStatusProto fs = 1;
-}
-
-message IsFileClosedRequestProto {
- required string src = 1;
-}
-
-message IsFileClosedResponseProto {
- required bool result = 1;
-}
-
-message CacheDirectiveInfoProto {
- optional int64 id = 1;
- optional string path = 2;
- optional uint32 replication = 3;
- optional string pool = 4;
- optional CacheDirectiveInfoExpirationProto expiration = 5;
-}
-
-message CacheDirectiveInfoExpirationProto {
- required int64 millis = 1;
- required bool isRelative = 2;
-}
-
-message CacheDirectiveStatsProto {
- required int64 bytesNeeded = 1;
- required int64 bytesCached = 2;
- required int64 filesNeeded = 3;
- required int64 filesCached = 4;
- required bool hasExpired = 5;
-}
-
-enum CacheFlagProto {
- FORCE = 0x01; // Ignore pool resource limits
-}
-
-message AddCacheDirectiveRequestProto {
- required CacheDirectiveInfoProto info = 1;
- optional uint32 cacheFlags = 2; // bits set using CacheFlag
-}
-
-message AddCacheDirectiveResponseProto {
- required int64 id = 1;
-}
-
-message ModifyCacheDirectiveRequestProto {
- required CacheDirectiveInfoProto info = 1;
- optional uint32 cacheFlags = 2; // bits set using CacheFlag
-}
-
-message ModifyCacheDirectiveResponseProto {
-}
-
-message RemoveCacheDirectiveRequestProto {
- required int64 id = 1;
-}
-
-message RemoveCacheDirectiveResponseProto {
-}
-
-message ListCacheDirectivesRequestProto {
- required int64 prevId = 1;
- required CacheDirectiveInfoProto filter = 2;
-}
-
-message CacheDirectiveEntryProto {
- required CacheDirectiveInfoProto info = 1;
- required CacheDirectiveStatsProto stats = 2;
-}
-
-message ListCacheDirectivesResponseProto {
- repeated CacheDirectiveEntryProto elements = 1;
- required bool hasMore = 2;
-}
-
-message CachePoolInfoProto {
- optional string poolName = 1;
- optional string ownerName = 2;
- optional string groupName = 3;
- optional int32 mode = 4;
- optional int64 limit = 5;
- optional int64 maxRelativeExpiry = 6;
-}
-
-message CachePoolStatsProto {
- required int64 bytesNeeded = 1;
- required int64 bytesCached = 2;
- required int64 bytesOverlimit = 3;
- required int64 filesNeeded = 4;
- required int64 filesCached = 5;
-}
-
-message AddCachePoolRequestProto {
- required CachePoolInfoProto info = 1;
-}
-
-message AddCachePoolResponseProto { // void response
-}
-
-message ModifyCachePoolRequestProto {
- required CachePoolInfoProto info = 1;
-}
-
-message ModifyCachePoolResponseProto { // void response
-}
-
-message RemoveCachePoolRequestProto {
- required string poolName = 1;
-}
-
-message RemoveCachePoolResponseProto { // void response
-}
-
-message ListCachePoolsRequestProto {
- required string prevPoolName = 1;
-}
-
-message ListCachePoolsResponseProto {
- repeated CachePoolEntryProto entries = 1;
- required bool hasMore = 2;
-}
-
-message CachePoolEntryProto {
- required CachePoolInfoProto info = 1;
- required CachePoolStatsProto stats = 2;
-}
-
-message GetFileLinkInfoRequestProto {
- required string src = 1;
-}
-
-message GetFileLinkInfoResponseProto {
- optional HdfsFileStatusProto fs = 1;
-}
-
-message GetContentSummaryRequestProto {
- required string path = 1;
-}
-
-message GetContentSummaryResponseProto {
- required ContentSummaryProto summary = 1;
-}
-
-message SetQuotaRequestProto {
- required string path = 1;
- required uint64 namespaceQuota = 2;
- required uint64 storagespaceQuota = 3;
- optional StorageTypeProto storageType = 4;
-}
-
-message SetQuotaResponseProto { // void response
-}
-
-message FsyncRequestProto {
- required string src = 1;
- required string client = 2;
- optional sint64 lastBlockLength = 3 [default = -1];
- optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID
-}
-
-message FsyncResponseProto { // void response
-}
-
-message SetTimesRequestProto {
- required string src = 1;
- required uint64 mtime = 2;
- required uint64 atime = 3;
-}
-
-message SetTimesResponseProto { // void response
-}
-
-message CreateSymlinkRequestProto {
- required string target = 1;
- required string link = 2;
- required FsPermissionProto dirPerm = 3;
- required bool createParent = 4;
-}
-
-message CreateSymlinkResponseProto { // void response
-}
-
-message GetLinkTargetRequestProto {
- required string path = 1;
-}
-message GetLinkTargetResponseProto {
- optional string targetPath = 1;
-}
-
-message UpdateBlockForPipelineRequestProto {
- required ExtendedBlockProto block = 1;
- required string clientName = 2;
-}
-
-message UpdateBlockForPipelineResponseProto {
- required LocatedBlockProto block = 1;
-}
-
-message UpdatePipelineRequestProto {
- required string clientName = 1;
- required ExtendedBlockProto oldBlock = 2;
- required ExtendedBlockProto newBlock = 3;
- repeated DatanodeIDProto newNodes = 4;
- repeated string storageIDs = 5;
-}
-
-message UpdatePipelineResponseProto { // void response
-}
-
-message SetBalancerBandwidthRequestProto {
- required int64 bandwidth = 1;
-}
-
-message SetBalancerBandwidthResponseProto { // void response
-}
-
-message GetDataEncryptionKeyRequestProto { // no parameters
-}
-
-message GetDataEncryptionKeyResponseProto {
- optional DataEncryptionKeyProto dataEncryptionKey = 1;
-}
-
-message CreateSnapshotRequestProto {
- required string snapshotRoot = 1;
- optional string snapshotName = 2;
-}
-
-message CreateSnapshotResponseProto {
- required string snapshotPath = 1;
-}
-
-message RenameSnapshotRequestProto {
- required string snapshotRoot = 1;
- required string snapshotOldName = 2;
- required string snapshotNewName = 3;
-}
-
-message RenameSnapshotResponseProto { // void response
-}
-
-message AllowSnapshotRequestProto {
- required string snapshotRoot = 1;
-}
-
-message AllowSnapshotResponseProto {
-}
-
-message DisallowSnapshotRequestProto {
- required string snapshotRoot = 1;
-}
-
-message DisallowSnapshotResponseProto {
-}
-
-message DeleteSnapshotRequestProto {
- required string snapshotRoot = 1;
- required string snapshotName = 2;
-}
-
-message DeleteSnapshotResponseProto { // void response
-}
-
-message CheckAccessRequestProto {
- required string path = 1;
- required AclEntryProto.FsActionProto mode = 2;
-}
-
-message CheckAccessResponseProto { // void response
-}
-
-message GetCurrentEditLogTxidRequestProto {
-}
-
-message GetCurrentEditLogTxidResponseProto {
- required int64 txid = 1;
-}
-
-message GetEditsFromTxidRequestProto {
- required int64 txid = 1;
-}
-
-message GetEditsFromTxidResponseProto {
- required EventsListProto eventsList = 1;
-}
-
-service ClientNamenodeProtocol {
- rpc getBlockLocations(GetBlockLocationsRequestProto)
- returns(GetBlockLocationsResponseProto);
- rpc getServerDefaults(GetServerDefaultsRequestProto)
- returns(GetServerDefaultsResponseProto);
- rpc create(CreateRequestProto)returns(CreateResponseProto);
- rpc append(AppendRequestProto) returns(AppendResponseProto);
- rpc setReplication(SetReplicationRequestProto)
- returns(SetReplicationResponseProto);
- rpc setStoragePolicy(SetStoragePolicyRequestProto)
- returns(SetStoragePolicyResponseProto);
- rpc getStoragePolicies(GetStoragePoliciesRequestProto)
- returns(GetStoragePoliciesResponseProto);
- rpc setPermission(SetPermissionRequestProto)
- returns(SetPermissionResponseProto);
- rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto);
- rpc abandonBlock(AbandonBlockRequestProto) returns(AbandonBlockResponseProto);
- rpc addBlock(AddBlockRequestProto) returns(AddBlockResponseProto);
- rpc getAdditionalDatanode(GetAdditionalDatanodeRequestProto)
- returns(GetAdditionalDatanodeResponseProto);
- rpc complete(CompleteRequestProto) returns(CompleteResponseProto);
- rpc reportBadBlocks(ReportBadBlocksRequestProto)
- returns(ReportBadBlocksResponseProto);
- rpc concat(ConcatRequestProto) returns(ConcatResponseProto);
- rpc truncate(TruncateRequestProto) returns(TruncateResponseProto);
- rpc rename(RenameRequestProto) returns(RenameResponseProto);
- rpc rename2(Rename2RequestProto) returns(Rename2ResponseProto);
- rpc delete(DeleteRequestProto) returns(DeleteResponseProto);
- rpc mkdirs(MkdirsRequestProto) returns(MkdirsResponseProto);
- rpc getListing(GetListingRequestProto) returns(GetListingResponseProto);
- rpc renewLease(RenewLeaseRequestProto) returns(RenewLeaseResponseProto);
- rpc recoverLease(RecoverLeaseRequestProto)
- returns(RecoverLeaseResponseProto);
- rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
- rpc getDatanodeReport(GetDatanodeReportRequestProto)
- returns(GetDatanodeReportResponseProto);
- rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
- returns(GetDatanodeStorageReportResponseProto);
- rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
- returns(GetPreferredBlockSizeResponseProto);
- rpc setSafeMode(SetSafeModeRequestProto)
- returns(SetSafeModeResponseProto);
- rpc saveNamespace(SaveNamespaceRequestProto)
- returns(SaveNamespaceResponseProto);
- rpc rollEdits(RollEditsRequestProto)
- returns(RollEditsResponseProto);
- rpc restoreFailedStorage(RestoreFailedStorageRequestProto)
- returns(RestoreFailedStorageResponseProto);
- rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
- rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
- returns(FinalizeUpgradeResponseProto);
- rpc rollingUpgrade(RollingUpgradeRequestProto)
- returns(RollingUpgradeResponseProto);
- rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
- returns(ListCorruptFileBlocksResponseProto);
- rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
- rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
- rpc addCacheDirective(AddCacheDirectiveRequestProto)
- returns (AddCacheDirectiveResponseProto);
- rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)
- returns (ModifyCacheDirectiveResponseProto);
- rpc removeCacheDirective(RemoveCacheDirectiveRequestProto)
- returns (RemoveCacheDirectiveResponseProto);
- rpc listCacheDirectives(ListCacheDirectivesRequestProto)
- returns (ListCacheDirectivesResponseProto);
- rpc addCachePool(AddCachePoolRequestProto)
- returns(AddCachePoolResponseProto);
- rpc modifyCachePool(ModifyCachePoolRequestProto)
- returns(ModifyCachePoolResponseProto);
- rpc removeCachePool(RemoveCachePoolRequestProto)
- returns(RemoveCachePoolResponseProto);
- rpc listCachePools(ListCachePoolsRequestProto)
- returns(ListCachePoolsResponseProto);
- rpc getFileLinkInfo(GetFileLinkInfoRequestProto)
- returns(GetFileLinkInfoResponseProto);
- rpc getContentSummary(GetContentSummaryRequestProto)
- returns(GetContentSummaryResponseProto);
- rpc setQuota(SetQuotaRequestProto) returns(SetQuotaResponseProto);
- rpc fsync(FsyncRequestProto) returns(FsyncResponseProto);
- rpc setTimes(SetTimesRequestProto) returns(SetTimesResponseProto);
- rpc createSymlink(CreateSymlinkRequestProto)
- returns(CreateSymlinkResponseProto);
- rpc getLinkTarget(GetLinkTargetRequestProto)
- returns(GetLinkTargetResponseProto);
- rpc updateBlockForPipeline(UpdateBlockForPipelineRequestProto)
- returns(UpdateBlockForPipelineResponseProto);
- rpc updatePipeline(UpdatePipelineRequestProto)
- returns(UpdatePipelineResponseProto);
- rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto)
- returns(hadoop.common.GetDelegationTokenResponseProto);
- rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto)
- returns(hadoop.common.RenewDelegationTokenResponseProto);
- rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto)
- returns(hadoop.common.CancelDelegationTokenResponseProto);
- rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
- returns(SetBalancerBandwidthResponseProto);
- rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
- returns(GetDataEncryptionKeyResponseProto);
- rpc createSnapshot(CreateSnapshotRequestProto)
- returns(CreateSnapshotResponseProto);
- rpc renameSnapshot(RenameSnapshotRequestProto)
- returns(RenameSnapshotResponseProto);
- rpc allowSnapshot(AllowSnapshotRequestProto)
- returns(AllowSnapshotResponseProto);
- rpc disallowSnapshot(DisallowSnapshotRequestProto)
- returns(DisallowSnapshotResponseProto);
- rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto)
- returns(GetSnapshottableDirListingResponseProto);
- rpc deleteSnapshot(DeleteSnapshotRequestProto)
- returns(DeleteSnapshotResponseProto);
- rpc getSnapshotDiffReport(GetSnapshotDiffReportRequestProto)
- returns(GetSnapshotDiffReportResponseProto);
- rpc isFileClosed(IsFileClosedRequestProto)
- returns(IsFileClosedResponseProto);
- rpc modifyAclEntries(ModifyAclEntriesRequestProto)
- returns(ModifyAclEntriesResponseProto);
- rpc removeAclEntries(RemoveAclEntriesRequestProto)
- returns(RemoveAclEntriesResponseProto);
- rpc removeDefaultAcl(RemoveDefaultAclRequestProto)
- returns(RemoveDefaultAclResponseProto);
- rpc removeAcl(RemoveAclRequestProto)
- returns(RemoveAclResponseProto);
- rpc setAcl(SetAclRequestProto)
- returns(SetAclResponseProto);
- rpc getAclStatus(GetAclStatusRequestProto)
- returns(GetAclStatusResponseProto);
- rpc setXAttr(SetXAttrRequestProto)
- returns(SetXAttrResponseProto);
- rpc getXAttrs(GetXAttrsRequestProto)
- returns(GetXAttrsResponseProto);
- rpc listXAttrs(ListXAttrsRequestProto)
- returns(ListXAttrsResponseProto);
- rpc removeXAttr(RemoveXAttrRequestProto)
- returns(RemoveXAttrResponseProto);
- rpc checkAccess(CheckAccessRequestProto)
- returns(CheckAccessResponseProto);
- rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
- returns(CreateEncryptionZoneResponseProto);
- rpc listEncryptionZones(ListEncryptionZonesRequestProto)
- returns(ListEncryptionZonesResponseProto);
- rpc getEZForPath(GetEZForPathRequestProto)
- returns(GetEZForPathResponseProto);
- rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto)
- returns(GetCurrentEditLogTxidResponseProto);
- rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
- returns(GetEditsFromTxidResponseProto);
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto
deleted file mode 100644
index 57cc855..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "AclProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-import "hdfs.proto";
-
-message AclEntryProto {
- enum AclEntryScopeProto {
- ACCESS = 0x0;
- DEFAULT = 0x1;
- }
-
- enum AclEntryTypeProto {
- USER = 0x0;
- GROUP = 0x1;
- MASK = 0x2;
- OTHER = 0x3;
- }
-
- enum FsActionProto {
- NONE = 0x0;
- EXECUTE = 0x1;
- WRITE = 0x2;
- WRITE_EXECUTE = 0x3;
- READ = 0x4;
- READ_EXECUTE = 0x5;
- READ_WRITE = 0x6;
- PERM_ALL = 0x7;
- }
-
- required AclEntryTypeProto type = 1;
- required AclEntryScopeProto scope = 2;
- required FsActionProto permissions = 3;
- optional string name = 4;
-}
-
-message AclStatusProto {
- required string owner = 1;
- required string group = 2;
- required bool sticky = 3;
- repeated AclEntryProto entries = 4;
- optional FsPermissionProto permission = 5;
-}
-
-message AclEditLogProto {
- required string src = 1;
- repeated AclEntryProto entries = 2;
-}
-
-message ModifyAclEntriesRequestProto {
- required string src = 1;
- repeated AclEntryProto aclSpec = 2;
-}
-
-message ModifyAclEntriesResponseProto {
-}
-
-message RemoveAclRequestProto {
- required string src = 1;
-}
-
-message RemoveAclResponseProto {
-}
-
-message RemoveAclEntriesRequestProto {
- required string src = 1;
- repeated AclEntryProto aclSpec = 2;
-}
-
-message RemoveAclEntriesResponseProto {
-}
-
-message RemoveDefaultAclRequestProto {
- required string src = 1;
-}
-
-message RemoveDefaultAclResponseProto {
-}
-
-message SetAclRequestProto {
- required string src = 1;
- repeated AclEntryProto aclSpec = 2;
-}
-
-message SetAclResponseProto {
-}
-
-message GetAclStatusRequestProto {
- required string src = 1;
-}
-
-message GetAclStatusResponseProto {
- required AclStatusProto result = 1;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
deleted file mode 100644
index 5071d15..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and stable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *stable* .proto interface.
- */
-
-// This file contains protocol buffers that are used to transfer data
-// to and from the datanode, as well as between datanodes.
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "DataTransferProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-import "Security.proto";
-import "hdfs.proto";
-
-message DataTransferEncryptorMessageProto {
- enum DataTransferEncryptorStatus {
- SUCCESS = 0;
- ERROR_UNKNOWN_KEY = 1;
- ERROR = 2;
- }
- required DataTransferEncryptorStatus status = 1;
- optional bytes payload = 2;
- optional string message = 3;
- repeated CipherOptionProto cipherOption = 4;
-}
-
-message BaseHeaderProto {
- required ExtendedBlockProto block = 1;
- optional hadoop.common.TokenProto token = 2;
- optional DataTransferTraceInfoProto traceInfo = 3;
-}
-
-message DataTransferTraceInfoProto {
- required uint64 traceId = 1;
- required uint64 parentId = 2;
-}
-
-message ClientOperationHeaderProto {
- required BaseHeaderProto baseHeader = 1;
- required string clientName = 2;
-}
-
-message CachingStrategyProto {
- optional bool dropBehind = 1;
- optional int64 readahead = 2;
-}
-
-message OpReadBlockProto {
- required ClientOperationHeaderProto header = 1;
- required uint64 offset = 2;
- required uint64 len = 3;
- optional bool sendChecksums = 4 [default = true];
- optional CachingStrategyProto cachingStrategy = 5;
-}
-
-
-message ChecksumProto {
- required ChecksumTypeProto type = 1;
- required uint32 bytesPerChecksum = 2;
-}
-
-message OpWriteBlockProto {
- required ClientOperationHeaderProto header = 1;
- repeated DatanodeInfoProto targets = 2;
- optional DatanodeInfoProto source = 3;
- enum BlockConstructionStage {
- PIPELINE_SETUP_APPEND = 0;
- // pipeline set up for failed PIPELINE_SETUP_APPEND recovery
- PIPELINE_SETUP_APPEND_RECOVERY = 1;
- // data streaming
- DATA_STREAMING = 2;
- // pipeline setup for failed data streaming recovery
- PIPELINE_SETUP_STREAMING_RECOVERY = 3;
- // close the block and pipeline
- PIPELINE_CLOSE = 4;
- // Recover a failed PIPELINE_CLOSE
- PIPELINE_CLOSE_RECOVERY = 5;
- // pipeline set up for block creation
- PIPELINE_SETUP_CREATE = 6;
- // transfer RBW for adding datanodes
- TRANSFER_RBW = 7;
- // transfer Finalized for adding datanodes
- TRANSFER_FINALIZED = 8;
- }
- required BlockConstructionStage stage = 4;
- required uint32 pipelineSize = 5;
- required uint64 minBytesRcvd = 6;
- required uint64 maxBytesRcvd = 7;
- required uint64 latestGenerationStamp = 8;
-
- /**
- * The requested checksum mechanism for this block write.
- */
- required ChecksumProto requestedChecksum = 9;
- optional CachingStrategyProto cachingStrategy = 10;
- optional StorageTypeProto storageType = 11 [default = DISK];
- repeated StorageTypeProto targetStorageTypes = 12;
-
- /**
- * Hint to the DataNode that the block can be allocated on transient
- * storage i.e. memory and written to disk lazily. The DataNode is free
- * to ignore this hint.
- */
- optional bool allowLazyPersist = 13 [default = false];
- //whether to pin the block, so Balancer won't move it.
- optional bool pinning = 14 [default = false];
- repeated bool targetPinnings = 15;
-}
-
-message OpTransferBlockProto {
- required ClientOperationHeaderProto header = 1;
- repeated DatanodeInfoProto targets = 2;
- repeated StorageTypeProto targetStorageTypes = 3;
-}
-
-message OpReplaceBlockProto {
- required BaseHeaderProto header = 1;
- required string delHint = 2;
- required DatanodeInfoProto source = 3;
- optional StorageTypeProto storageType = 4 [default = DISK];
-}
-
-message OpCopyBlockProto {
- required BaseHeaderProto header = 1;
-}
-
-message OpBlockChecksumProto {
- required BaseHeaderProto header = 1;
-}
-
-/**
- * An ID uniquely identifying a shared memory segment.
- */
-message ShortCircuitShmIdProto {
- required int64 hi = 1;
- required int64 lo = 2;
-}
-
-/**
- * An ID uniquely identifying a slot within a shared memory segment.
- */
-message ShortCircuitShmSlotProto {
- required ShortCircuitShmIdProto shmId = 1;
- required int32 slotIdx = 2;
-}
-
-message OpRequestShortCircuitAccessProto {
- required BaseHeaderProto header = 1;
-
- /** In order to get short-circuit access to block data, clients must set this
- * to the highest version of the block data that they can understand.
- * Currently 1 is the only version, but more versions may exist in the future
- * if the on-disk format changes.
- */
- required uint32 maxVersion = 2;
-
- /**
- * The shared memory slot to use, if we are using one.
- */
- optional ShortCircuitShmSlotProto slotId = 3;
-
- /**
- * True if the client supports verifying that the file descriptor has been
- * sent successfully.
- */
- optional bool supportsReceiptVerification = 4 [default = false];
-}
-
-message ReleaseShortCircuitAccessRequestProto {
- required ShortCircuitShmSlotProto slotId = 1;
- optional DataTransferTraceInfoProto traceInfo = 2;
-}
-
-message ReleaseShortCircuitAccessResponseProto {
- required Status status = 1;
- optional string error = 2;
-}
-
-message ShortCircuitShmRequestProto {
- // The name of the client requesting the shared memory segment. This is
- // purely for logging / debugging purposes.
- required string clientName = 1;
- optional DataTransferTraceInfoProto traceInfo = 2;
-}
-
-message ShortCircuitShmResponseProto {
- required Status status = 1;
- optional string error = 2;
- optional ShortCircuitShmIdProto id = 3;
-}
-
-message PacketHeaderProto {
- // All fields must be fixed-length!
- required sfixed64 offsetInBlock = 1;
- required sfixed64 seqno = 2;
- required bool lastPacketInBlock = 3;
- required sfixed32 dataLen = 4;
- optional bool syncBlock = 5 [default = false];
-}
-
-// Status is a 4-bit enum
-enum Status {
- SUCCESS = 0;
- ERROR = 1;
- ERROR_CHECKSUM = 2;
- ERROR_INVALID = 3;
- ERROR_EXISTS = 4;
- ERROR_ACCESS_TOKEN = 5;
- CHECKSUM_OK = 6;
- ERROR_UNSUPPORTED = 7;
- OOB_RESTART = 8; // Quick restart
- OOB_RESERVED1 = 9; // Reserved
- OOB_RESERVED2 = 10; // Reserved
- OOB_RESERVED3 = 11; // Reserved
- IN_PROGRESS = 12;
-}
-
-enum ShortCircuitFdResponse {
- DO_NOT_USE_RECEIPT_VERIFICATION = 0;
- USE_RECEIPT_VERIFICATION = 1;
-}
-
-message PipelineAckProto {
- required sint64 seqno = 1;
- repeated Status reply = 2;
- optional uint64 downstreamAckTimeNanos = 3 [default = 0];
- repeated uint32 flag = 4 [packed=true];
-}
-
-/**
- * Sent as part of the BlockOpResponseProto
- * for READ_BLOCK and COPY_BLOCK operations.
- */
-message ReadOpChecksumInfoProto {
- required ChecksumProto checksum = 1;
-
- /**
- * The offset into the block at which the first packet
- * will start. This is necessary since reads will align
- * backwards to a checksum chunk boundary.
- */
- required uint64 chunkOffset = 2;
-}
-
-message BlockOpResponseProto {
- required Status status = 1;
-
- optional string firstBadLink = 2;
- optional OpBlockChecksumResponseProto checksumResponse = 3;
- optional ReadOpChecksumInfoProto readOpChecksumInfo = 4;
-
- /** explanatory text which may be useful to log on the client side */
- optional string message = 5;
-
- /** If the server chooses to agree to the request of a client for
- * short-circuit access, it will send a response message with the relevant
- * file descriptors attached.
- *
- * In the body of the message, this version number will be set to the
- * specific version number of the block data that the client is about to
- * read.
- */
- optional uint32 shortCircuitAccessVersion = 6;
-}
-
-/**
- * Message sent from the client to the DN after reading the entire
- * read request.
- */
-message ClientReadStatusProto {
- required Status status = 1;
-}
-
-message DNTransferAckProto {
- required Status status = 1;
-}
-
-message OpBlockChecksumResponseProto {
- required uint32 bytesPerCrc = 1;
- required uint64 crcPerBlock = 2;
- required bytes md5 = 3;
- optional ChecksumTypeProto crcType = 4;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/editlog.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/editlog.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/editlog.proto
new file mode 100644
index 0000000..f25fe59
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/editlog.proto
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "EditLogProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "acl.proto";
+import "xattr.proto";
+
+message AclEditLogProto {
+ required string src = 1;
+ repeated AclEntryProto entries = 2;
+}
+
+message XAttrEditLogProto {
+ optional string src = 1;
+ repeated XAttrProto xAttrs = 2;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
deleted file mode 100644
index 68b2f3a..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and stable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *stable* .proto interface.
- */
-
-// This file contains protocol buffers that are used throughout HDFS -- i.e.
-// by the client, server, and data transfer protocols.
-
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "EncryptionZonesProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-import "hdfs.proto";
-
-message CreateEncryptionZoneRequestProto {
- required string src = 1;
- optional string keyName = 2;
-}
-
-message CreateEncryptionZoneResponseProto {
-}
-
-message ListEncryptionZonesRequestProto {
- required int64 id = 1;
-}
-
-message EncryptionZoneProto {
- required int64 id = 1;
- required string path = 2;
- required CipherSuiteProto suite = 3;
- required CryptoProtocolVersionProto cryptoProtocolVersion = 4;
- required string keyName = 5;
-}
-
-message ListEncryptionZonesResponseProto {
- repeated EncryptionZoneProto zones = 1;
- required bool hasMore = 2;
-}
-
-message GetEZForPathRequestProto {
- required string src = 1;
-}
-
-message GetEZForPathResponseProto {
- optional EncryptionZoneProto zone = 1;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
deleted file mode 100644
index 86fb462..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ /dev/null
@@ -1,611 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and stable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *stable* .proto interface.
- */
-
-// This file contains protocol buffers that are used throughout HDFS -- i.e.
-// by the client, server, and data transfer protocols.
-
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "HdfsProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-import "Security.proto";
-
-/**
- * Extended block idenfies a block
- */
-message ExtendedBlockProto {
- required string poolId = 1; // Block pool id - gloablly unique across clusters
- required uint64 blockId = 2; // the local id within a pool
- required uint64 generationStamp = 3;
- optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid
- // here for historical reasons
-}
-
-/**
- * Identifies a Datanode
- */
-message DatanodeIDProto {
- required string ipAddr = 1; // IP address
- required string hostName = 2; // hostname
- required string datanodeUuid = 3; // UUID assigned to the Datanode. For
- // upgraded clusters this is the same
- // as the original StorageID of the
- // Datanode.
- required uint32 xferPort = 4; // data streaming port
- required uint32 infoPort = 5; // datanode http port
- required uint32 ipcPort = 6; // ipc server port
- optional uint32 infoSecurePort = 7 [default = 0]; // datanode https port
-}
-
-/**
- * Datanode local information
- */
-message DatanodeLocalInfoProto {
- required string softwareVersion = 1;
- required string configVersion = 2;
- required uint64 uptime = 3;
-}
-
-/**
- * DatanodeInfo array
- */
-message DatanodeInfosProto {
- repeated DatanodeInfoProto datanodes = 1;
-}
-
-/**
- * The status of a Datanode
- */
-message DatanodeInfoProto {
- required DatanodeIDProto id = 1;
- optional uint64 capacity = 2 [default = 0];
- optional uint64 dfsUsed = 3 [default = 0];
- optional uint64 remaining = 4 [default = 0];
- optional uint64 blockPoolUsed = 5 [default = 0];
- optional uint64 lastUpdate = 6 [default = 0];
- optional uint32 xceiverCount = 7 [default = 0];
- optional string location = 8;
- enum AdminState {
- NORMAL = 0;
- DECOMMISSION_INPROGRESS = 1;
- DECOMMISSIONED = 2;
- }
-
- optional AdminState adminState = 10 [default = NORMAL];
- optional uint64 cacheCapacity = 11 [default = 0];
- optional uint64 cacheUsed = 12 [default = 0];
- optional uint64 lastUpdateMonotonic = 13 [default = 0];
-}
-
-/**
- * Represents a storage available on the datanode
- */
-message DatanodeStorageProto {
- enum StorageState {
- NORMAL = 0;
- READ_ONLY_SHARED = 1;
- }
-
- required string storageUuid = 1;
- optional StorageState state = 2 [default = NORMAL];
- optional StorageTypeProto storageType = 3 [default = DISK];
-}
-
-message StorageReportProto {
- required string storageUuid = 1 [ deprecated = true ];
- optional bool failed = 2 [ default = false ];
- optional uint64 capacity = 3 [ default = 0 ];
- optional uint64 dfsUsed = 4 [ default = 0 ];
- optional uint64 remaining = 5 [ default = 0 ];
- optional uint64 blockPoolUsed = 6 [ default = 0 ];
- optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
-}
-
-/**
- * Summary of a file or directory
- */
-message ContentSummaryProto {
- required uint64 length = 1;
- required uint64 fileCount = 2;
- required uint64 directoryCount = 3;
- required uint64 quota = 4;
- required uint64 spaceConsumed = 5;
- required uint64 spaceQuota = 6;
- optional StorageTypeQuotaInfosProto typeQuotaInfos = 7;
-}
-
-/**
- * Storage type quota and usage information of a file or directory
- */
-message StorageTypeQuotaInfosProto {
- repeated StorageTypeQuotaInfoProto typeQuotaInfo = 1;
-}
-
-message StorageTypeQuotaInfoProto {
- required StorageTypeProto type = 1;
- required uint64 quota = 2;
- required uint64 consumed = 3;
-}
-
-/**
- * Contains a list of paths corresponding to corrupt files and a cookie
- * used for iterative calls to NameNode.listCorruptFileBlocks.
- *
- */
-message CorruptFileBlocksProto {
- repeated string files = 1;
- required string cookie = 2;
-}
-
-/**
- * File or Directory permision - same spec as posix
- */
-message FsPermissionProto {
- required uint32 perm = 1; // Actually a short - only 16bits used
-}
-
-/**
- * Types of recognized storage media.
- */
-enum StorageTypeProto {
- DISK = 1;
- SSD = 2;
- ARCHIVE = 3;
- RAM_DISK = 4;
-}
-
-/**
- * A list of storage types.
- */
-message StorageTypesProto {
- repeated StorageTypeProto storageTypes = 1;
-}
-
-/**
- * Block replica storage policy.
- */
-message BlockStoragePolicyProto {
- required uint32 policyId = 1;
- required string name = 2;
- // a list of storage types for storing the block replicas when creating a
- // block.
- required StorageTypesProto creationPolicy = 3;
- // A list of storage types for creation fallback storage.
- optional StorageTypesProto creationFallbackPolicy = 4;
- optional StorageTypesProto replicationFallbackPolicy = 5;
-}
-
-/**
- * A list of storage IDs.
- */
-message StorageUuidsProto {
- repeated string storageUuids = 1;
-}
-
-/**
- * A LocatedBlock gives information about a block and its location.
- */
-message LocatedBlockProto {
- required ExtendedBlockProto b = 1;
- required uint64 offset = 2; // offset of first byte of block in the file
- repeated DatanodeInfoProto locs = 3; // Locations ordered by proximity to client ip
- required bool corrupt = 4; // true if all replicas of a block are corrupt, else false
- // If block has few corrupt replicas, they are filtered and
- // their locations are not part of this object
-
- required hadoop.common.TokenProto blockToken = 5;
- repeated bool isCached = 6 [packed=true]; // if a location in locs is cached
- repeated StorageTypeProto storageTypes = 7;
- repeated string storageIDs = 8;
-}
-
-message DataEncryptionKeyProto {
- required uint32 keyId = 1;
- required string blockPoolId = 2;
- required bytes nonce = 3;
- required bytes encryptionKey = 4;
- required uint64 expiryDate = 5;
- optional string encryptionAlgorithm = 6;
-}
-
-/**
- * Cipher suite.
- */
-enum CipherSuiteProto {
- UNKNOWN = 1;
- AES_CTR_NOPADDING = 2;
-}
-
-/**
- * Crypto protocol version used to access encrypted files.
- */
-enum CryptoProtocolVersionProto {
- UNKNOWN_PROTOCOL_VERSION = 1;
- ENCRYPTION_ZONES = 2;
-}
-
-/**
- * Encryption information for a file.
- */
-message FileEncryptionInfoProto {
- required CipherSuiteProto suite = 1;
- required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
- required bytes key = 3;
- required bytes iv = 4;
- required string keyName = 5;
- required string ezKeyVersionName = 6;
-}
-
-/**
- * Encryption information for an individual
- * file within an encryption zone
- */
-message PerFileEncryptionInfoProto {
- required bytes key = 1;
- required bytes iv = 2;
- required string ezKeyVersionName = 3;
-}
-
-/**
- * Encryption information for an encryption
- * zone
- */
-message ZoneEncryptionInfoProto {
- required CipherSuiteProto suite = 1;
- required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
- required string keyName = 3;
-}
-
-/**
- * Cipher option
- */
-message CipherOptionProto {
- required CipherSuiteProto suite = 1;
- optional bytes inKey = 2;
- optional bytes inIv = 3;
- optional bytes outKey = 4;
- optional bytes outIv = 5;
-}
-
-/**
- * A set of file blocks and their locations.
- */
-message LocatedBlocksProto {
- required uint64 fileLength = 1;
- repeated LocatedBlockProto blocks = 2;
- required bool underConstruction = 3;
- optional LocatedBlockProto lastBlock = 4;
- required bool isLastBlockComplete = 5;
- optional FileEncryptionInfoProto fileEncryptionInfo = 6;
-}
-
-/**
- * Status of a file, directory or symlink
- * Optionally includes a file's block locations if requested by client on the rpc call.
- */
-message HdfsFileStatusProto {
- enum FileType {
- IS_DIR = 1;
- IS_FILE = 2;
- IS_SYMLINK = 3;
- }
- required FileType fileType = 1;
- required bytes path = 2; // local name of inode encoded java UTF8
- required uint64 length = 3;
- required FsPermissionProto permission = 4;
- required string owner = 5;
- required string group = 6;
- required uint64 modification_time = 7;
- required uint64 access_time = 8;
-
- // Optional fields for symlink
- optional bytes symlink = 9; // if symlink, target encoded java UTF8
-
- // Optional fields for file
- optional uint32 block_replication = 10 [default = 0]; // only 16bits used
- optional uint64 blocksize = 11 [default = 0];
- optional LocatedBlocksProto locations = 12; // suppled only if asked by client
-
- // Optional field for fileId
- optional uint64 fileId = 13 [default = 0]; // default as an invalid id
- optional int32 childrenNum = 14 [default = -1];
- // Optional field for file encryption
- optional FileEncryptionInfoProto fileEncryptionInfo = 15;
-
- optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
-}
-
-/**
- * Checksum algorithms/types used in HDFS
- * Make sure this enum's integer values match enum values' id properties defined
- * in org.apache.hadoop.util.DataChecksum.Type
- */
-enum ChecksumTypeProto {
- CHECKSUM_NULL = 0;
- CHECKSUM_CRC32 = 1;
- CHECKSUM_CRC32C = 2;
-}
-
-/**
- * HDFS Server Defaults
- */
-message FsServerDefaultsProto {
- required uint64 blockSize = 1;
- required uint32 bytesPerChecksum = 2;
- required uint32 writePacketSize = 3;
- required uint32 replication = 4; // Actually a short - only 16 bits used
- required uint32 fileBufferSize = 5;
- optional bool encryptDataTransfer = 6 [default = false];
- optional uint64 trashInterval = 7 [default = 0];
- optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
-}
-
-
-/**
- * Directory listing
- */
-message DirectoryListingProto {
- repeated HdfsFileStatusProto partialListing = 1;
- required uint32 remainingEntries = 2;
-}
-
-/**
- * Status of a snapshottable directory: besides the normal information for
- * a directory status, also include snapshot quota, number of snapshots, and
- * the full path of the parent directory.
- */
-message SnapshottableDirectoryStatusProto {
- required HdfsFileStatusProto dirStatus = 1;
-
- // Fields specific for snapshottable directory
- required uint32 snapshot_quota = 2;
- required uint32 snapshot_number = 3;
- required bytes parent_fullpath = 4;
-}
-
-/**
- * Snapshottable directory listing
- */
-message SnapshottableDirectoryListingProto {
- repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
-}
-
-/**
- * Snapshot diff report entry
- */
-message SnapshotDiffReportEntryProto {
- required bytes fullpath = 1;
- required string modificationLabel = 2;
- optional bytes targetPath = 3;
-}
-
-/**
- * Snapshot diff report
- */
-message SnapshotDiffReportProto {
- // full path of the directory where snapshots were taken
- required string snapshotRoot = 1;
- required string fromSnapshot = 2;
- required string toSnapshot = 3;
- repeated SnapshotDiffReportEntryProto diffReportEntries = 4;
-}
-
-/**
- * Common node information shared by all the nodes in the cluster
- */
-message StorageInfoProto {
- required uint32 layoutVersion = 1; // Layout version of the file system
- required uint32 namespceID = 2; // File system namespace ID
- required string clusterID = 3; // ID of the cluster
- required uint64 cTime = 4; // File system creation time
-}
-
-/**
- * Information sent by a namenode to identify itself to the primary namenode.
- */
-message NamenodeRegistrationProto {
- required string rpcAddress = 1; // host:port of the namenode RPC address
- required string httpAddress = 2; // host:port of the namenode http server
- enum NamenodeRoleProto {
- NAMENODE = 1;
- BACKUP = 2;
- CHECKPOINT = 3;
- }
- required StorageInfoProto storageInfo = 3; // Node information
- optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role
-}
-
-/**
- * Unique signature to identify checkpoint transactions.
- */
-message CheckpointSignatureProto {
- required string blockPoolId = 1;
- required uint64 mostRecentCheckpointTxId = 2;
- required uint64 curSegmentTxId = 3;
- required StorageInfoProto storageInfo = 4;
-}
-
-/**
- * Command sent from one namenode to another namenode.
- */
-message NamenodeCommandProto {
- enum Type {
- NamenodeCommand = 0; // Base command
- CheckPointCommand = 1; // Check point command
- }
- required uint32 action = 1;
- required Type type = 2;
- optional CheckpointCommandProto checkpointCmd = 3;
-}
-
-/**
- * Command returned from primary to checkpointing namenode.
- * This command has checkpoint signature that identifies
- * checkpoint transaction and is needed for further
- * communication related to checkpointing.
- */
-message CheckpointCommandProto {
- // Unique signature to identify checkpoint transation
- required CheckpointSignatureProto signature = 1;
-
- // If true, return transfer image to primary upon the completion of checkpoint
- required bool needToReturnImage = 2;
-}
-
-/**
- * Block information
- *
- * Please be wary of adding additional fields here, since INodeFiles
- * need to fit in PB's default max message size of 64MB.
- * We restrict the max # of blocks per file
- * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
- * to avoid changing this.
- */
-message BlockProto {
- required uint64 blockId = 1;
- required uint64 genStamp = 2;
- optional uint64 numBytes = 3 [default = 0];
-}
-
-/**
- * Block and datanodes where is it located
- */
-message BlockWithLocationsProto {
- required BlockProto block = 1; // Block
- repeated string datanodeUuids = 2; // Datanodes with replicas of the block
- repeated string storageUuids = 3; // Storages with replicas of the block
- repeated StorageTypeProto storageTypes = 4;
-}
-
-/**
- * List of block with locations
- */
-message BlocksWithLocationsProto {
- repeated BlockWithLocationsProto blocks = 1;
-}
-
-/**
- * Editlog information with available transactions
- */
-message RemoteEditLogProto {
- required uint64 startTxId = 1; // Starting available edit log transaction
- required uint64 endTxId = 2; // Ending available edit log transaction
- optional bool isInProgress = 3 [default = false];
-}
-
-/**
- * Enumeration of editlogs available on a remote namenode
- */
-message RemoteEditLogManifestProto {
- repeated RemoteEditLogProto logs = 1;
-}
-
-/**
- * Namespace information that describes namespace on a namenode
- */
-message NamespaceInfoProto {
- required string buildVersion = 1; // Software revision version (e.g. an svn or git revision)
- required uint32 unused = 2; // Retained for backward compatibility
- required string blockPoolID = 3; // block pool used by the namespace
- required StorageInfoProto storageInfo = 4;// Node information
- required string softwareVersion = 5; // Software version number (e.g. 2.0.0)
- optional uint64 capabilities = 6 [default = 0]; // feature flags
-}
-
-/**
- * Block access token information
- */
-message BlockKeyProto {
- required uint32 keyId = 1; // Key identifier
- required uint64 expiryDate = 2; // Expiry time in milliseconds
- optional bytes keyBytes = 3; // Key secret
-}
-
-/**
- * Current key and set of block keys at the namenode.
- */
-message ExportedBlockKeysProto {
- required bool isBlockTokenEnabled = 1;
- required uint64 keyUpdateInterval = 2;
- required uint64 tokenLifeTime = 3;
- required BlockKeyProto currentKey = 4;
- repeated BlockKeyProto allKeys = 5;
-}
-
-/**
- * State of a block replica at a datanode
- */
-enum ReplicaStateProto {
- FINALIZED = 0; // State of a replica when it is not modified
- RBW = 1; // State of replica that is being written to
- RWR = 2; // State of replica that is waiting to be recovered
- RUR = 3; // State of replica that is under recovery
- TEMPORARY = 4; // State of replica that is created for replication
-}
-
-/**
- * Block that needs to be recovered with at a given location
- */
-message RecoveringBlockProto {
- required uint64 newGenStamp = 1; // New genstamp post recovery
- required LocatedBlockProto block = 2; // Block to be recovered
- optional BlockProto truncateBlock = 3; // New block for recovery (truncate)
-}
-
-/**
- * void request
- */
-message VersionRequestProto {
-}
-
-/**
- * Version response from namenode.
- */
-message VersionResponseProto {
- required NamespaceInfoProto info = 1;
-}
-
-/**
- * Information related to a snapshot
- * TODO: add more information
- */
-message SnapshotInfoProto {
- required string snapshotName = 1;
- required string snapshotRoot = 2;
- required FsPermissionProto permission = 3;
- required string owner = 4;
- required string group = 5;
- required string createTime = 6;
- // TODO: do we need access time?
-}
-
-/**
- * Rolling upgrade status
- */
-message RollingUpgradeStatusProto {
- required string blockPoolId = 1;
- optional bool finalized = 2 [default = false];
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
deleted file mode 100644
index 5b78fe6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and stable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *stable* .proto interface.
- */
-
-// This file contains protocol buffers used to communicate edits to clients
-// as part of the inotify system.
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "InotifyProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-import "acl.proto";
-import "xattr.proto";
-import "hdfs.proto";
-
-enum EventType {
- EVENT_CREATE = 0x0;
- EVENT_CLOSE = 0x1;
- EVENT_APPEND = 0x2;
- EVENT_RENAME = 0x3;
- EVENT_METADATA = 0x4;
- EVENT_UNLINK = 0x5;
-}
-
-message EventProto {
- required EventType type = 1;
- required bytes contents = 2;
-}
-
-message EventBatchProto {
- required int64 txid = 1;
- repeated EventProto events = 2;
-}
-
-enum INodeType {
- I_TYPE_FILE = 0x0;
- I_TYPE_DIRECTORY = 0x1;
- I_TYPE_SYMLINK = 0x2;
-}
-
-enum MetadataUpdateType {
- META_TYPE_TIMES = 0x0;
- META_TYPE_REPLICATION = 0x1;
- META_TYPE_OWNER = 0x2;
- META_TYPE_PERMS = 0x3;
- META_TYPE_ACLS = 0x4;
- META_TYPE_XATTRS = 0x5;
-}
-
-message CreateEventProto {
- required INodeType type = 1;
- required string path = 2;
- required int64 ctime = 3;
- required string ownerName = 4;
- required string groupName = 5;
- required FsPermissionProto perms = 6;
- optional int32 replication = 7;
- optional string symlinkTarget = 8;
- optional bool overwrite = 9;
- optional int64 defaultBlockSize = 10 [default=0];
-}
-
-message CloseEventProto {
- required string path = 1;
- required int64 fileSize = 2;
- required int64 timestamp = 3;
-}
-
-message AppendEventProto {
- required string path = 1;
- optional bool newBlock = 2 [default = false];
-}
-
-message RenameEventProto {
- required string srcPath = 1;
- required string destPath = 2;
- required int64 timestamp = 3;
-}
-
-message MetadataUpdateEventProto {
- required string path = 1;
- required MetadataUpdateType type = 2;
- optional int64 mtime = 3;
- optional int64 atime = 4;
- optional int32 replication = 5;
- optional string ownerName = 6;
- optional string groupName = 7;
- optional FsPermissionProto perms = 8;
- repeated AclEntryProto acls = 9;
- repeated XAttrProto xAttrs = 10;
- optional bool xAttrsRemoved = 11;
-}
-
-message UnlinkEventProto {
- required string path = 1;
- required int64 timestamp = 2;
-}
-
-message EventsListProto {
- repeated EventProto events = 1; // deprecated
- required int64 firstTxid = 2;
- required int64 lastTxid = 3;
- required int64 syncTxid = 4;
- repeated EventBatchProto batch = 5;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
deleted file mode 100644
index acdc28e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
-option java_outer_classname = "XAttrProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
-
-message XAttrProto {
- enum XAttrNamespaceProto {
- USER = 0;
- TRUSTED = 1;
- SECURITY = 2;
- SYSTEM = 3;
- RAW = 4;
- }
-
- required XAttrNamespaceProto namespace = 1;
- required string name = 2;
- optional bytes value = 3;
-}
-
-message XAttrEditLogProto {
- optional string src = 1;
- repeated XAttrProto xAttrs = 2;
-}
-
-enum XAttrSetFlagProto {
- XATTR_CREATE = 0x01;
- XATTR_REPLACE = 0x02;
-}
-
-message SetXAttrRequestProto {
- required string src = 1;
- optional XAttrProto xAttr = 2;
- optional uint32 flag = 3; //bits set using XAttrSetFlagProto
-}
-
-message SetXAttrResponseProto {
-}
-
-message GetXAttrsRequestProto {
- required string src = 1;
- repeated XAttrProto xAttrs = 2;
-}
-
-message GetXAttrsResponseProto {
- repeated XAttrProto xAttrs = 1;
-}
-
-message ListXAttrsRequestProto {
- required string src = 1;
-}
-
-message ListXAttrsResponseProto {
- repeated XAttrProto xAttrs = 1;
-}
-
-message RemoveXAttrRequestProto {
- required string src = 1;
- optional XAttrProto xAttr = 2;
-}
-
-message RemoveXAttrResponseProto {
-}
[03/21] hadoop git commit: HDFS-8726. Move protobuf files that define
the client-sever protocols to hdfs-client. Contributed by Haohui Mai.
Posted by aw...@apache.org.
HDFS-8726. Move protobuf files that define the client-sever protocols to hdfs-client. Contributed by Haohui Mai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc6182d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc6182d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc6182d5
Branch: refs/heads/HADOOP-12111
Commit: fc6182d5ed92ac70de1f4633edd5265b7be1a8dc
Parents: 4119ad3
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Jul 8 10:37:10 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Jul 8 10:37:10 2015 -0700
----------------------------------------------------------------------
.../dev-support/findbugsExcludeFile.xml | 4 +
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml | 42 +
.../src/main/proto/ClientDatanodeProtocol.proto | 247 ++++++
.../src/main/proto/ClientNamenodeProtocol.proto | 863 +++++++++++++++++++
.../hadoop-hdfs-client/src/main/proto/acl.proto | 108 +++
.../src/main/proto/datatransfer.proto | 304 +++++++
.../src/main/proto/encryption.proto | 67 ++
.../src/main/proto/hdfs.proto | 611 +++++++++++++
.../src/main/proto/inotify.proto | 126 +++
.../src/main/proto/xattr.proto | 75 ++
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
hadoop-hdfs-project/hadoop-hdfs/pom.xml | 10 +-
.../hadoop-hdfs/src/contrib/bkjournal/pom.xml | 2 +-
.../hdfs/server/namenode/FSEditLogOp.java | 4 +-
.../src/main/proto/ClientDatanodeProtocol.proto | 247 ------
.../src/main/proto/ClientNamenodeProtocol.proto | 863 -------------------
.../hadoop-hdfs/src/main/proto/acl.proto | 113 ---
.../src/main/proto/datatransfer.proto | 304 -------
.../hadoop-hdfs/src/main/proto/editlog.proto | 35 +
.../hadoop-hdfs/src/main/proto/encryption.proto | 67 --
.../hadoop-hdfs/src/main/proto/hdfs.proto | 611 -------------
.../hadoop-hdfs/src/main/proto/inotify.proto | 126 ---
.../hadoop-hdfs/src/main/proto/xattr.proto | 80 --
23 files changed, 2490 insertions(+), 2422 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index be2911f..ba6453d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -17,4 +17,8 @@
</Or>
<Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
</Match>
+ <Match>
+ <Package name="org.apache.hadoop.hdfs.protocol.proto" />
+ <Bug pattern="SE_BAD_FIELD,MS_SHOULD_BE_FINAL,UCF_USELESS_CONTROL_FLOW" />
+ </Match>
</FindBugsFilter>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 1b45095..aeaa980 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -63,6 +63,48 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</excludes>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-maven-plugins</artifactId>
+ <executions>
+ <execution>
+ <id>compile-protoc</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>protoc</goal>
+ </goals>
+ <configuration>
+ <protocVersion>${protobuf.version}</protocVersion>
+ <protocCommand>${protoc.path}</protocCommand>
+ <imports>
+ <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
+ <param>${basedir}/src/main/proto</param>
+ </imports>
+ <source>
+ <directory>${basedir}/src/main/proto</directory>
+ <includes>
+ <include>ClientDatanodeProtocol.proto</include>
+ <include>ClientNamenodeProtocol.proto</include>
+ <include>acl.proto</include>
+ <include>xattr.proto</include>
+ <include>datatransfer.proto</include>
+ <include>hdfs.proto</include>
+ <include>encryption.proto</include>
+ <include>inotify.proto</include>
+ </includes>
+ </source>
+ <output>${project.build.directory}/generated-sources/java</output>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <configuration>
+ <excludePackageNames>org.apache.hadoop.hdfs.protocol.proto</excludePackageNames>
+ </configuration>
+ </plugin>
</plugins>
</build>
</project>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
new file mode 100644
index 0000000..e0d1f5f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
@@ -0,0 +1,247 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used throughout HDFS -- i.e.
+// by the client, server, and data transfer protocols.
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "ClientDatanodeProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "Security.proto";
+import "hdfs.proto";
+
+/**
+ * block - block for which visible length is requested
+ */
+message GetReplicaVisibleLengthRequestProto {
+ required ExtendedBlockProto block = 1;
+}
+
+/**
+ * length - visible length of the block
+ */
+message GetReplicaVisibleLengthResponseProto {
+ required uint64 length = 1;
+}
+
+/**
+ * void request
+ */
+message RefreshNamenodesRequestProto {
+}
+
+/**
+ * void response
+ */
+message RefreshNamenodesResponseProto {
+}
+
+/**
+ * blockPool - block pool to be deleted
+ * force - if false, delete the block pool only if it is empty.
+ * if true, delete the block pool even if it has blocks.
+ */
+message DeleteBlockPoolRequestProto {
+ required string blockPool = 1;
+ required bool force = 2;
+}
+
+/**
+ * void response
+ */
+message DeleteBlockPoolResponseProto {
+}
+
+/**
+ * Gets the file information where block and its metadata is stored
+ * block - block for which path information is being requested
+ * token - block token
+ *
+ * This message is deprecated in favor of file descriptor passing.
+ */
+message GetBlockLocalPathInfoRequestProto {
+ required ExtendedBlockProto block = 1;
+ required hadoop.common.TokenProto token = 2;
+}
+
+/**
+ * block - block for which file path information is being returned
+ * localPath - file path where the block data is stored
+ * localMetaPath - file path where the block meta data is stored
+ *
+ * This message is deprecated in favor of file descriptor passing.
+ */
+message GetBlockLocalPathInfoResponseProto {
+ required ExtendedBlockProto block = 1;
+ required string localPath = 2;
+ required string localMetaPath = 3;
+}
+
+/**
+ * Query for the disk locations of a number of blocks on this DN.
+ * blockPoolId - the pool to query
+ * blockIds - list of block IDs to query
+ * tokens - list of access tokens corresponding to list of block IDs
+ */
+message GetHdfsBlockLocationsRequestProto {
+ // Removed: HDFS-3969
+ // repeated ExtendedBlockProto blocks = 1;
+ repeated hadoop.common.TokenProto tokens = 2;
+
+ required string blockPoolId = 3;
+ repeated sfixed64 blockIds = 4 [ packed = true ];
+}
+
+/**
+ * volumeIds - id of each volume, potentially multiple bytes
+ * volumeIndexes - for each block, an index into volumeIds specifying the volume
+ * on which it is located. If block is not present on any volume,
+ * index is set to MAX_INT.
+ */
+message GetHdfsBlockLocationsResponseProto {
+ repeated bytes volumeIds = 1;
+ repeated uint32 volumeIndexes = 2 [ packed = true ];
+}
+
+/**
+ * forUpgrade - if true, clients are advised to wait for restart and quick
+ * upgrade restart is instrumented. Otherwise, datanode does
+ * the regular shutdown.
+ */
+message ShutdownDatanodeRequestProto {
+ required bool forUpgrade = 1;
+}
+
+message ShutdownDatanodeResponseProto {
+}
+
+/**
+ * Ping datanode for liveness and quick info
+ */
+message GetDatanodeInfoRequestProto {
+}
+
+message GetDatanodeInfoResponseProto {
+ required DatanodeLocalInfoProto localInfo = 1;
+}
+
+/** Asks DataNode to reload configuration file. */
+message StartReconfigurationRequestProto {
+}
+
+message StartReconfigurationResponseProto {
+}
+
+message TriggerBlockReportRequestProto {
+ required bool incremental = 1;
+}
+
+message TriggerBlockReportResponseProto {
+}
+
+/** Query the running status of reconfiguration process */
+message GetReconfigurationStatusRequestProto {
+}
+
+message GetReconfigurationStatusConfigChangeProto {
+ required string name = 1;
+ required string oldValue = 2;
+ optional string newValue = 3;
+ optional string errorMessage = 4; // It is empty if success.
+}
+
+message GetReconfigurationStatusResponseProto {
+ required int64 startTime = 1;
+ optional int64 endTime = 2;
+ repeated GetReconfigurationStatusConfigChangeProto changes = 3;
+}
+
+message ListReconfigurablePropertiesRequestProto {
+}
+
+/** Query the reconfigurable properties on DataNode. */
+message ListReconfigurablePropertiesResponseProto {
+ repeated string name = 1;
+}
+
+/**
+ * Protocol used from client to the Datanode.
+ * See the request and response for details of rpc call.
+ */
+service ClientDatanodeProtocolService {
+ /**
+ * Returns the visible length of the replica
+ */
+ rpc getReplicaVisibleLength(GetReplicaVisibleLengthRequestProto)
+ returns(GetReplicaVisibleLengthResponseProto);
+
+ /**
+ * Refresh the list of federated namenodes from updated configuration.
+ * Adds new namenodes and stops the deleted namenodes.
+ */
+ rpc refreshNamenodes(RefreshNamenodesRequestProto)
+ returns(RefreshNamenodesResponseProto);
+
+ /**
+ * Delete the block pool from the datanode.
+ */
+ rpc deleteBlockPool(DeleteBlockPoolRequestProto)
+ returns(DeleteBlockPoolResponseProto);
+
+ /**
+ * Retrieves the path names of the block file and metadata file stored on the
+ * local file system.
+ */
+ rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto)
+ returns(GetBlockLocalPathInfoResponseProto);
+
+ /**
+ * Retrieve additional HDFS-specific metadata about a set of blocks stored
+ * on the local file system.
+ */
+ rpc getHdfsBlockLocations(GetHdfsBlockLocationsRequestProto)
+ returns(GetHdfsBlockLocationsResponseProto);
+
+ rpc shutdownDatanode(ShutdownDatanodeRequestProto)
+ returns(ShutdownDatanodeResponseProto);
+
+ rpc getDatanodeInfo(GetDatanodeInfoRequestProto)
+ returns(GetDatanodeInfoResponseProto);
+
+ rpc getReconfigurationStatus(GetReconfigurationStatusRequestProto)
+ returns(GetReconfigurationStatusResponseProto);
+
+ rpc startReconfiguration(StartReconfigurationRequestProto)
+ returns(StartReconfigurationResponseProto);
+
+ rpc listReconfigurableProperties(
+ ListReconfigurablePropertiesRequestProto)
+ returns(ListReconfigurablePropertiesResponseProto);
+
+ rpc triggerBlockReport(TriggerBlockReportRequestProto)
+ returns(TriggerBlockReportResponseProto);
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
new file mode 100644
index 0000000..b44c556
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -0,0 +1,863 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "ClientNamenodeProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "Security.proto";
+import "hdfs.proto";
+import "acl.proto";
+import "xattr.proto";
+import "encryption.proto";
+import "inotify.proto";
+
+/**
+ * The ClientNamenodeProtocol Service defines the interface between a client
+ * (as runnign inside a MR Task) and the Namenode.
+ * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc
+ * for each of the methods.
+ * The exceptions declared in the above class also apply to this protocol.
+ * Exceptions are unwrapped and thrown by the PB libraries.
+ */
+
+message GetBlockLocationsRequestProto {
+ required string src = 1; // file name
+ required uint64 offset = 2; // range start offset
+ required uint64 length = 3; // range length
+}
+
+message GetBlockLocationsResponseProto {
+ optional LocatedBlocksProto locations = 1;
+}
+
+message GetServerDefaultsRequestProto { // No parameters
+}
+
+message GetServerDefaultsResponseProto {
+ required FsServerDefaultsProto serverDefaults = 1;
+}
+
+enum CreateFlagProto {
+ CREATE = 0x01; // Create a file
+ OVERWRITE = 0x02; // Truncate/overwrite a file. Same as POSIX O_TRUNC
+ APPEND = 0x04; // Append to a file
+ LAZY_PERSIST = 0x10; // File with reduced durability guarantees.
+ NEW_BLOCK = 0x20; // Write data to a new block when appending
+}
+
+message CreateRequestProto {
+ required string src = 1;
+ required FsPermissionProto masked = 2;
+ required string clientName = 3;
+ required uint32 createFlag = 4; // bits set using CreateFlag
+ required bool createParent = 5;
+ required uint32 replication = 6; // Short: Only 16 bits used
+ required uint64 blockSize = 7;
+ repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8;
+}
+
+message CreateResponseProto {
+ optional HdfsFileStatusProto fs = 1;
+}
+
+message AppendRequestProto {
+ required string src = 1;
+ required string clientName = 2;
+ optional uint32 flag = 3; // bits set using CreateFlag
+}
+
+message AppendResponseProto {
+ optional LocatedBlockProto block = 1;
+ optional HdfsFileStatusProto stat = 2;
+}
+
+message SetReplicationRequestProto {
+ required string src = 1;
+ required uint32 replication = 2; // Short: Only 16 bits used
+}
+
+message SetReplicationResponseProto {
+ required bool result = 1;
+}
+
+message SetStoragePolicyRequestProto {
+ required string src = 1;
+ required string policyName = 2;
+}
+
+message SetStoragePolicyResponseProto { // void response
+}
+
+message GetStoragePoliciesRequestProto { // void request
+}
+
+message GetStoragePoliciesResponseProto {
+ repeated BlockStoragePolicyProto policies = 1;
+}
+
+message SetPermissionRequestProto {
+ required string src = 1;
+ required FsPermissionProto permission = 2;
+}
+
+message SetPermissionResponseProto { // void response
+}
+
+message SetOwnerRequestProto {
+ required string src = 1;
+ optional string username = 2;
+ optional string groupname = 3;
+}
+
+message SetOwnerResponseProto { // void response
+}
+
+message AbandonBlockRequestProto {
+ required ExtendedBlockProto b = 1;
+ required string src = 2;
+ required string holder = 3;
+ optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID
+}
+
+message AbandonBlockResponseProto { // void response
+}
+
+message AddBlockRequestProto {
+ required string src = 1;
+ required string clientName = 2;
+ optional ExtendedBlockProto previous = 3;
+ repeated DatanodeInfoProto excludeNodes = 4;
+ optional uint64 fileId = 5 [default = 0]; // default as a bogus id
+ repeated string favoredNodes = 6; //the set of datanodes to use for the block
+}
+
+message AddBlockResponseProto {
+ required LocatedBlockProto block = 1;
+}
+
+message GetAdditionalDatanodeRequestProto {
+ required string src = 1;
+ required ExtendedBlockProto blk = 2;
+ repeated DatanodeInfoProto existings = 3;
+ repeated DatanodeInfoProto excludes = 4;
+ required uint32 numAdditionalNodes = 5;
+ required string clientName = 6;
+ repeated string existingStorageUuids = 7;
+ optional uint64 fileId = 8 [default = 0]; // default to GRANDFATHER_INODE_ID
+}
+
+message GetAdditionalDatanodeResponseProto {
+ required LocatedBlockProto block = 1;
+}
+
+message CompleteRequestProto {
+ required string src = 1;
+ required string clientName = 2;
+ optional ExtendedBlockProto last = 3;
+ optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID
+}
+
+message CompleteResponseProto {
+ required bool result = 1;
+}
+
+message ReportBadBlocksRequestProto {
+ repeated LocatedBlockProto blocks = 1;
+}
+
+message ReportBadBlocksResponseProto { // void response
+}
+
+message ConcatRequestProto {
+ required string trg = 1;
+ repeated string srcs = 2;
+}
+
+message ConcatResponseProto { // void response
+}
+
+message TruncateRequestProto {
+ required string src = 1;
+ required uint64 newLength = 2;
+ required string clientName = 3;
+}
+
+message TruncateResponseProto {
+ required bool result = 1;
+}
+
+message RenameRequestProto {
+ required string src = 1;
+ required string dst = 2;
+}
+
+message RenameResponseProto {
+ required bool result = 1;
+}
+
+
+message Rename2RequestProto {
+ required string src = 1;
+ required string dst = 2;
+ required bool overwriteDest = 3;
+}
+
+message Rename2ResponseProto { // void response
+}
+
+message DeleteRequestProto {
+ required string src = 1;
+ required bool recursive = 2;
+}
+
+message DeleteResponseProto {
+ required bool result = 1;
+}
+
+message MkdirsRequestProto {
+ required string src = 1;
+ required FsPermissionProto masked = 2;
+ required bool createParent = 3;
+}
+message MkdirsResponseProto {
+ required bool result = 1;
+}
+
+message GetListingRequestProto {
+ required string src = 1;
+ required bytes startAfter = 2;
+ required bool needLocation = 3;
+}
+message GetListingResponseProto {
+ optional DirectoryListingProto dirList = 1;
+}
+
+message GetSnapshottableDirListingRequestProto { // no input parameters
+}
+message GetSnapshottableDirListingResponseProto {
+ optional SnapshottableDirectoryListingProto snapshottableDirList = 1;
+}
+
+message GetSnapshotDiffReportRequestProto {
+ required string snapshotRoot = 1;
+ required string fromSnapshot = 2;
+ required string toSnapshot = 3;
+}
+message GetSnapshotDiffReportResponseProto {
+ required SnapshotDiffReportProto diffReport = 1;
+}
+
+message RenewLeaseRequestProto {
+ required string clientName = 1;
+}
+
+message RenewLeaseResponseProto { //void response
+}
+
+message RecoverLeaseRequestProto {
+ required string src = 1;
+ required string clientName = 2;
+}
+message RecoverLeaseResponseProto {
+ required bool result = 1;
+}
+
+message GetFsStatusRequestProto { // no input paramters
+}
+
+message GetFsStatsResponseProto {
+ required uint64 capacity = 1;
+ required uint64 used = 2;
+ required uint64 remaining = 3;
+ required uint64 under_replicated = 4;
+ required uint64 corrupt_blocks = 5;
+ required uint64 missing_blocks = 6;
+ optional uint64 missing_repl_one_blocks = 7;
+}
+
+enum DatanodeReportTypeProto { // type of the datanode report
+ ALL = 1;
+ LIVE = 2;
+ DEAD = 3;
+ DECOMMISSIONING = 4;
+}
+
+message GetDatanodeReportRequestProto {
+ required DatanodeReportTypeProto type = 1;
+}
+
+message GetDatanodeReportResponseProto {
+ repeated DatanodeInfoProto di = 1;
+}
+
+message GetDatanodeStorageReportRequestProto {
+ required DatanodeReportTypeProto type = 1;
+}
+
+message DatanodeStorageReportProto {
+ required DatanodeInfoProto datanodeInfo = 1;
+ repeated StorageReportProto storageReports = 2;
+}
+
+message GetDatanodeStorageReportResponseProto {
+ repeated DatanodeStorageReportProto datanodeStorageReports = 1;
+}
+
+message GetPreferredBlockSizeRequestProto {
+ required string filename = 1;
+}
+
+message GetPreferredBlockSizeResponseProto {
+ required uint64 bsize = 1;
+}
+
+enum SafeModeActionProto {
+ SAFEMODE_LEAVE = 1;
+ SAFEMODE_ENTER = 2;
+ SAFEMODE_GET = 3;
+}
+
+message SetSafeModeRequestProto {
+ required SafeModeActionProto action = 1;
+ optional bool checked = 2 [default = false];
+}
+
+message SetSafeModeResponseProto {
+ required bool result = 1;
+}
+
+message SaveNamespaceRequestProto {
+ optional uint64 timeWindow = 1 [default = 0];
+ optional uint64 txGap = 2 [default = 0];
+}
+
+message SaveNamespaceResponseProto { // void response
+ optional bool saved = 1 [default = true];
+}
+
+message RollEditsRequestProto { // no parameters
+}
+
+message RollEditsResponseProto { // response
+ required uint64 newSegmentTxId = 1;
+}
+
+message RestoreFailedStorageRequestProto {
+ required string arg = 1;
+}
+
+message RestoreFailedStorageResponseProto {
+ required bool result = 1;
+}
+
+message RefreshNodesRequestProto { // no parameters
+}
+
+message RefreshNodesResponseProto { // void response
+}
+
+message FinalizeUpgradeRequestProto { // no parameters
+}
+
+message FinalizeUpgradeResponseProto { // void response
+}
+
+enum RollingUpgradeActionProto {
+ QUERY = 1;
+ START = 2;
+ FINALIZE = 3;
+}
+
+message RollingUpgradeRequestProto {
+ required RollingUpgradeActionProto action = 1;
+}
+
+message RollingUpgradeInfoProto {
+ required RollingUpgradeStatusProto status = 1;
+ required uint64 startTime = 2;
+ required uint64 finalizeTime = 3;
+ required bool createdRollbackImages = 4;
+}
+
+message RollingUpgradeResponseProto {
+ optional RollingUpgradeInfoProto rollingUpgradeInfo= 1;
+}
+
+message ListCorruptFileBlocksRequestProto {
+ required string path = 1;
+ optional string cookie = 2;
+}
+
+message ListCorruptFileBlocksResponseProto {
+ required CorruptFileBlocksProto corrupt = 1;
+}
+
+message MetaSaveRequestProto {
+ required string filename = 1;
+}
+
+message MetaSaveResponseProto { // void response
+}
+
+message GetFileInfoRequestProto {
+ required string src = 1;
+}
+
+message GetFileInfoResponseProto {
+ optional HdfsFileStatusProto fs = 1;
+}
+
+message IsFileClosedRequestProto {
+ required string src = 1;
+}
+
+message IsFileClosedResponseProto {
+ required bool result = 1;
+}
+
+message CacheDirectiveInfoProto {
+ optional int64 id = 1;
+ optional string path = 2;
+ optional uint32 replication = 3;
+ optional string pool = 4;
+ optional CacheDirectiveInfoExpirationProto expiration = 5;
+}
+
+message CacheDirectiveInfoExpirationProto {
+ required int64 millis = 1;
+ required bool isRelative = 2;
+}
+
+message CacheDirectiveStatsProto {
+ required int64 bytesNeeded = 1;
+ required int64 bytesCached = 2;
+ required int64 filesNeeded = 3;
+ required int64 filesCached = 4;
+ required bool hasExpired = 5;
+}
+
+enum CacheFlagProto {
+ FORCE = 0x01; // Ignore pool resource limits
+}
+
+message AddCacheDirectiveRequestProto {
+ required CacheDirectiveInfoProto info = 1;
+ optional uint32 cacheFlags = 2; // bits set using CacheFlag
+}
+
+message AddCacheDirectiveResponseProto {
+ required int64 id = 1;
+}
+
+message ModifyCacheDirectiveRequestProto {
+ required CacheDirectiveInfoProto info = 1;
+ optional uint32 cacheFlags = 2; // bits set using CacheFlag
+}
+
+message ModifyCacheDirectiveResponseProto {
+}
+
+message RemoveCacheDirectiveRequestProto {
+ required int64 id = 1;
+}
+
+message RemoveCacheDirectiveResponseProto {
+}
+
+message ListCacheDirectivesRequestProto {
+ required int64 prevId = 1;
+ required CacheDirectiveInfoProto filter = 2;
+}
+
+message CacheDirectiveEntryProto {
+ required CacheDirectiveInfoProto info = 1;
+ required CacheDirectiveStatsProto stats = 2;
+}
+
+message ListCacheDirectivesResponseProto {
+ repeated CacheDirectiveEntryProto elements = 1;
+ required bool hasMore = 2;
+}
+
+message CachePoolInfoProto {
+ optional string poolName = 1;
+ optional string ownerName = 2;
+ optional string groupName = 3;
+ optional int32 mode = 4;
+ optional int64 limit = 5;
+ optional int64 maxRelativeExpiry = 6;
+}
+
+message CachePoolStatsProto {
+ required int64 bytesNeeded = 1;
+ required int64 bytesCached = 2;
+ required int64 bytesOverlimit = 3;
+ required int64 filesNeeded = 4;
+ required int64 filesCached = 5;
+}
+
+message AddCachePoolRequestProto {
+ required CachePoolInfoProto info = 1;
+}
+
+message AddCachePoolResponseProto { // void response
+}
+
+message ModifyCachePoolRequestProto {
+ required CachePoolInfoProto info = 1;
+}
+
+message ModifyCachePoolResponseProto { // void response
+}
+
+message RemoveCachePoolRequestProto {
+ required string poolName = 1;
+}
+
+message RemoveCachePoolResponseProto { // void response
+}
+
+message ListCachePoolsRequestProto {
+ required string prevPoolName = 1;
+}
+
+message ListCachePoolsResponseProto {
+ repeated CachePoolEntryProto entries = 1;
+ required bool hasMore = 2;
+}
+
+message CachePoolEntryProto {
+ required CachePoolInfoProto info = 1;
+ required CachePoolStatsProto stats = 2;
+}
+
+message GetFileLinkInfoRequestProto {
+ required string src = 1;
+}
+
+message GetFileLinkInfoResponseProto {
+ optional HdfsFileStatusProto fs = 1;
+}
+
+message GetContentSummaryRequestProto {
+ required string path = 1;
+}
+
+message GetContentSummaryResponseProto {
+ required ContentSummaryProto summary = 1;
+}
+
+message SetQuotaRequestProto {
+ required string path = 1;
+ required uint64 namespaceQuota = 2;
+ required uint64 storagespaceQuota = 3;
+ optional StorageTypeProto storageType = 4;
+}
+
+message SetQuotaResponseProto { // void response
+}
+
+message FsyncRequestProto {
+ required string src = 1;
+ required string client = 2;
+ optional sint64 lastBlockLength = 3 [default = -1];
+ optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID
+}
+
+message FsyncResponseProto { // void response
+}
+
+message SetTimesRequestProto {
+ required string src = 1;
+ required uint64 mtime = 2;
+ required uint64 atime = 3;
+}
+
+message SetTimesResponseProto { // void response
+}
+
+message CreateSymlinkRequestProto {
+ required string target = 1;
+ required string link = 2;
+ required FsPermissionProto dirPerm = 3;
+ required bool createParent = 4;
+}
+
+message CreateSymlinkResponseProto { // void response
+}
+
+message GetLinkTargetRequestProto {
+ required string path = 1;
+}
+message GetLinkTargetResponseProto {
+ optional string targetPath = 1;
+}
+
+message UpdateBlockForPipelineRequestProto {
+ required ExtendedBlockProto block = 1;
+ required string clientName = 2;
+}
+
+message UpdateBlockForPipelineResponseProto {
+ required LocatedBlockProto block = 1;
+}
+
+message UpdatePipelineRequestProto {
+ required string clientName = 1;
+ required ExtendedBlockProto oldBlock = 2;
+ required ExtendedBlockProto newBlock = 3;
+ repeated DatanodeIDProto newNodes = 4;
+ repeated string storageIDs = 5;
+}
+
+message UpdatePipelineResponseProto { // void response
+}
+
+message SetBalancerBandwidthRequestProto {
+ required int64 bandwidth = 1;
+}
+
+message SetBalancerBandwidthResponseProto { // void response
+}
+
+message GetDataEncryptionKeyRequestProto { // no parameters
+}
+
+message GetDataEncryptionKeyResponseProto {
+ optional DataEncryptionKeyProto dataEncryptionKey = 1;
+}
+
+message CreateSnapshotRequestProto {
+ required string snapshotRoot = 1;
+ optional string snapshotName = 2;
+}
+
+message CreateSnapshotResponseProto {
+ required string snapshotPath = 1;
+}
+
+message RenameSnapshotRequestProto {
+ required string snapshotRoot = 1;
+ required string snapshotOldName = 2;
+ required string snapshotNewName = 3;
+}
+
+message RenameSnapshotResponseProto { // void response
+}
+
+message AllowSnapshotRequestProto {
+ required string snapshotRoot = 1;
+}
+
+message AllowSnapshotResponseProto {
+}
+
+message DisallowSnapshotRequestProto {
+ required string snapshotRoot = 1;
+}
+
+message DisallowSnapshotResponseProto {
+}
+
+message DeleteSnapshotRequestProto {
+ required string snapshotRoot = 1;
+ required string snapshotName = 2;
+}
+
+message DeleteSnapshotResponseProto { // void response
+}
+
+message CheckAccessRequestProto {
+ required string path = 1;
+ required AclEntryProto.FsActionProto mode = 2;
+}
+
+message CheckAccessResponseProto { // void response
+}
+
+message GetCurrentEditLogTxidRequestProto {
+}
+
+message GetCurrentEditLogTxidResponseProto {
+ required int64 txid = 1;
+}
+
+message GetEditsFromTxidRequestProto {
+ required int64 txid = 1;
+}
+
+message GetEditsFromTxidResponseProto {
+ required EventsListProto eventsList = 1;
+}
+
+service ClientNamenodeProtocol {
+ rpc getBlockLocations(GetBlockLocationsRequestProto)
+ returns(GetBlockLocationsResponseProto);
+ rpc getServerDefaults(GetServerDefaultsRequestProto)
+ returns(GetServerDefaultsResponseProto);
+ rpc create(CreateRequestProto)returns(CreateResponseProto);
+ rpc append(AppendRequestProto) returns(AppendResponseProto);
+ rpc setReplication(SetReplicationRequestProto)
+ returns(SetReplicationResponseProto);
+ rpc setStoragePolicy(SetStoragePolicyRequestProto)
+ returns(SetStoragePolicyResponseProto);
+ rpc getStoragePolicies(GetStoragePoliciesRequestProto)
+ returns(GetStoragePoliciesResponseProto);
+ rpc setPermission(SetPermissionRequestProto)
+ returns(SetPermissionResponseProto);
+ rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto);
+ rpc abandonBlock(AbandonBlockRequestProto) returns(AbandonBlockResponseProto);
+ rpc addBlock(AddBlockRequestProto) returns(AddBlockResponseProto);
+ rpc getAdditionalDatanode(GetAdditionalDatanodeRequestProto)
+ returns(GetAdditionalDatanodeResponseProto);
+ rpc complete(CompleteRequestProto) returns(CompleteResponseProto);
+ rpc reportBadBlocks(ReportBadBlocksRequestProto)
+ returns(ReportBadBlocksResponseProto);
+ rpc concat(ConcatRequestProto) returns(ConcatResponseProto);
+ rpc truncate(TruncateRequestProto) returns(TruncateResponseProto);
+ rpc rename(RenameRequestProto) returns(RenameResponseProto);
+ rpc rename2(Rename2RequestProto) returns(Rename2ResponseProto);
+ rpc delete(DeleteRequestProto) returns(DeleteResponseProto);
+ rpc mkdirs(MkdirsRequestProto) returns(MkdirsResponseProto);
+ rpc getListing(GetListingRequestProto) returns(GetListingResponseProto);
+ rpc renewLease(RenewLeaseRequestProto) returns(RenewLeaseResponseProto);
+ rpc recoverLease(RecoverLeaseRequestProto)
+ returns(RecoverLeaseResponseProto);
+ rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
+ rpc getDatanodeReport(GetDatanodeReportRequestProto)
+ returns(GetDatanodeReportResponseProto);
+ rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
+ returns(GetDatanodeStorageReportResponseProto);
+ rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
+ returns(GetPreferredBlockSizeResponseProto);
+ rpc setSafeMode(SetSafeModeRequestProto)
+ returns(SetSafeModeResponseProto);
+ rpc saveNamespace(SaveNamespaceRequestProto)
+ returns(SaveNamespaceResponseProto);
+ rpc rollEdits(RollEditsRequestProto)
+ returns(RollEditsResponseProto);
+ rpc restoreFailedStorage(RestoreFailedStorageRequestProto)
+ returns(RestoreFailedStorageResponseProto);
+ rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
+ rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
+ returns(FinalizeUpgradeResponseProto);
+ rpc rollingUpgrade(RollingUpgradeRequestProto)
+ returns(RollingUpgradeResponseProto);
+ rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
+ returns(ListCorruptFileBlocksResponseProto);
+ rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
+ rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
+ rpc addCacheDirective(AddCacheDirectiveRequestProto)
+ returns (AddCacheDirectiveResponseProto);
+ rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)
+ returns (ModifyCacheDirectiveResponseProto);
+ rpc removeCacheDirective(RemoveCacheDirectiveRequestProto)
+ returns (RemoveCacheDirectiveResponseProto);
+ rpc listCacheDirectives(ListCacheDirectivesRequestProto)
+ returns (ListCacheDirectivesResponseProto);
+ rpc addCachePool(AddCachePoolRequestProto)
+ returns(AddCachePoolResponseProto);
+ rpc modifyCachePool(ModifyCachePoolRequestProto)
+ returns(ModifyCachePoolResponseProto);
+ rpc removeCachePool(RemoveCachePoolRequestProto)
+ returns(RemoveCachePoolResponseProto);
+ rpc listCachePools(ListCachePoolsRequestProto)
+ returns(ListCachePoolsResponseProto);
+ rpc getFileLinkInfo(GetFileLinkInfoRequestProto)
+ returns(GetFileLinkInfoResponseProto);
+ rpc getContentSummary(GetContentSummaryRequestProto)
+ returns(GetContentSummaryResponseProto);
+ rpc setQuota(SetQuotaRequestProto) returns(SetQuotaResponseProto);
+ rpc fsync(FsyncRequestProto) returns(FsyncResponseProto);
+ rpc setTimes(SetTimesRequestProto) returns(SetTimesResponseProto);
+ rpc createSymlink(CreateSymlinkRequestProto)
+ returns(CreateSymlinkResponseProto);
+ rpc getLinkTarget(GetLinkTargetRequestProto)
+ returns(GetLinkTargetResponseProto);
+ rpc updateBlockForPipeline(UpdateBlockForPipelineRequestProto)
+ returns(UpdateBlockForPipelineResponseProto);
+ rpc updatePipeline(UpdatePipelineRequestProto)
+ returns(UpdatePipelineResponseProto);
+ rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto)
+ returns(hadoop.common.GetDelegationTokenResponseProto);
+ rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto)
+ returns(hadoop.common.RenewDelegationTokenResponseProto);
+ rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto)
+ returns(hadoop.common.CancelDelegationTokenResponseProto);
+ rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
+ returns(SetBalancerBandwidthResponseProto);
+ rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
+ returns(GetDataEncryptionKeyResponseProto);
+ rpc createSnapshot(CreateSnapshotRequestProto)
+ returns(CreateSnapshotResponseProto);
+ rpc renameSnapshot(RenameSnapshotRequestProto)
+ returns(RenameSnapshotResponseProto);
+ rpc allowSnapshot(AllowSnapshotRequestProto)
+ returns(AllowSnapshotResponseProto);
+ rpc disallowSnapshot(DisallowSnapshotRequestProto)
+ returns(DisallowSnapshotResponseProto);
+ rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto)
+ returns(GetSnapshottableDirListingResponseProto);
+ rpc deleteSnapshot(DeleteSnapshotRequestProto)
+ returns(DeleteSnapshotResponseProto);
+ rpc getSnapshotDiffReport(GetSnapshotDiffReportRequestProto)
+ returns(GetSnapshotDiffReportResponseProto);
+ rpc isFileClosed(IsFileClosedRequestProto)
+ returns(IsFileClosedResponseProto);
+ rpc modifyAclEntries(ModifyAclEntriesRequestProto)
+ returns(ModifyAclEntriesResponseProto);
+ rpc removeAclEntries(RemoveAclEntriesRequestProto)
+ returns(RemoveAclEntriesResponseProto);
+ rpc removeDefaultAcl(RemoveDefaultAclRequestProto)
+ returns(RemoveDefaultAclResponseProto);
+ rpc removeAcl(RemoveAclRequestProto)
+ returns(RemoveAclResponseProto);
+ rpc setAcl(SetAclRequestProto)
+ returns(SetAclResponseProto);
+ rpc getAclStatus(GetAclStatusRequestProto)
+ returns(GetAclStatusResponseProto);
+ rpc setXAttr(SetXAttrRequestProto)
+ returns(SetXAttrResponseProto);
+ rpc getXAttrs(GetXAttrsRequestProto)
+ returns(GetXAttrsResponseProto);
+ rpc listXAttrs(ListXAttrsRequestProto)
+ returns(ListXAttrsResponseProto);
+ rpc removeXAttr(RemoveXAttrRequestProto)
+ returns(RemoveXAttrResponseProto);
+ rpc checkAccess(CheckAccessRequestProto)
+ returns(CheckAccessResponseProto);
+ rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
+ returns(CreateEncryptionZoneResponseProto);
+ rpc listEncryptionZones(ListEncryptionZonesRequestProto)
+ returns(ListEncryptionZonesResponseProto);
+ rpc getEZForPath(GetEZForPathRequestProto)
+ returns(GetEZForPathResponseProto);
+ rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto)
+ returns(GetCurrentEditLogTxidResponseProto);
+ rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
+ returns(GetEditsFromTxidResponseProto);
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
new file mode 100644
index 0000000..bb7fdb0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "AclProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "hdfs.proto";
+
+message AclEntryProto {
+ enum AclEntryScopeProto {
+ ACCESS = 0x0;
+ DEFAULT = 0x1;
+ }
+
+ enum AclEntryTypeProto {
+ USER = 0x0;
+ GROUP = 0x1;
+ MASK = 0x2;
+ OTHER = 0x3;
+ }
+
+ enum FsActionProto {
+ NONE = 0x0;
+ EXECUTE = 0x1;
+ WRITE = 0x2;
+ WRITE_EXECUTE = 0x3;
+ READ = 0x4;
+ READ_EXECUTE = 0x5;
+ READ_WRITE = 0x6;
+ PERM_ALL = 0x7;
+ }
+
+ required AclEntryTypeProto type = 1;
+ required AclEntryScopeProto scope = 2;
+ required FsActionProto permissions = 3;
+ optional string name = 4;
+}
+
+message AclStatusProto {
+ required string owner = 1;
+ required string group = 2;
+ required bool sticky = 3;
+ repeated AclEntryProto entries = 4;
+ optional FsPermissionProto permission = 5;
+}
+
+message ModifyAclEntriesRequestProto {
+ required string src = 1;
+ repeated AclEntryProto aclSpec = 2;
+}
+
+message ModifyAclEntriesResponseProto {
+}
+
+message RemoveAclRequestProto {
+ required string src = 1;
+}
+
+message RemoveAclResponseProto {
+}
+
+message RemoveAclEntriesRequestProto {
+ required string src = 1;
+ repeated AclEntryProto aclSpec = 2;
+}
+
+message RemoveAclEntriesResponseProto {
+}
+
+message RemoveDefaultAclRequestProto {
+ required string src = 1;
+}
+
+message RemoveDefaultAclResponseProto {
+}
+
+message SetAclRequestProto {
+ required string src = 1;
+ repeated AclEntryProto aclSpec = 2;
+}
+
+message SetAclResponseProto {
+}
+
+message GetAclStatusRequestProto {
+ required string src = 1;
+}
+
+message GetAclStatusResponseProto {
+ required AclStatusProto result = 1;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
new file mode 100644
index 0000000..5071d15
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used to transfer data
+// to and from the datanode, as well as between datanodes.
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "DataTransferProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "Security.proto";
+import "hdfs.proto";
+
+message DataTransferEncryptorMessageProto {
+ enum DataTransferEncryptorStatus {
+ SUCCESS = 0;
+ ERROR_UNKNOWN_KEY = 1;
+ ERROR = 2;
+ }
+ required DataTransferEncryptorStatus status = 1;
+ optional bytes payload = 2;
+ optional string message = 3;
+ repeated CipherOptionProto cipherOption = 4;
+}
+
+message BaseHeaderProto {
+ required ExtendedBlockProto block = 1;
+ optional hadoop.common.TokenProto token = 2;
+ optional DataTransferTraceInfoProto traceInfo = 3;
+}
+
+message DataTransferTraceInfoProto {
+ required uint64 traceId = 1;
+ required uint64 parentId = 2;
+}
+
+message ClientOperationHeaderProto {
+ required BaseHeaderProto baseHeader = 1;
+ required string clientName = 2;
+}
+
+message CachingStrategyProto {
+ optional bool dropBehind = 1;
+ optional int64 readahead = 2;
+}
+
+message OpReadBlockProto {
+ required ClientOperationHeaderProto header = 1;
+ required uint64 offset = 2;
+ required uint64 len = 3;
+ optional bool sendChecksums = 4 [default = true];
+ optional CachingStrategyProto cachingStrategy = 5;
+}
+
+
+message ChecksumProto {
+ required ChecksumTypeProto type = 1;
+ required uint32 bytesPerChecksum = 2;
+}
+
+message OpWriteBlockProto {
+ required ClientOperationHeaderProto header = 1;
+ repeated DatanodeInfoProto targets = 2;
+ optional DatanodeInfoProto source = 3;
+ enum BlockConstructionStage {
+ PIPELINE_SETUP_APPEND = 0;
+ // pipeline set up for failed PIPELINE_SETUP_APPEND recovery
+ PIPELINE_SETUP_APPEND_RECOVERY = 1;
+ // data streaming
+ DATA_STREAMING = 2;
+ // pipeline setup for failed data streaming recovery
+ PIPELINE_SETUP_STREAMING_RECOVERY = 3;
+ // close the block and pipeline
+ PIPELINE_CLOSE = 4;
+ // Recover a failed PIPELINE_CLOSE
+ PIPELINE_CLOSE_RECOVERY = 5;
+ // pipeline set up for block creation
+ PIPELINE_SETUP_CREATE = 6;
+ // transfer RBW for adding datanodes
+ TRANSFER_RBW = 7;
+ // transfer Finalized for adding datanodes
+ TRANSFER_FINALIZED = 8;
+ }
+ required BlockConstructionStage stage = 4;
+ required uint32 pipelineSize = 5;
+ required uint64 minBytesRcvd = 6;
+ required uint64 maxBytesRcvd = 7;
+ required uint64 latestGenerationStamp = 8;
+
+ /**
+ * The requested checksum mechanism for this block write.
+ */
+ required ChecksumProto requestedChecksum = 9;
+ optional CachingStrategyProto cachingStrategy = 10;
+ optional StorageTypeProto storageType = 11 [default = DISK];
+ repeated StorageTypeProto targetStorageTypes = 12;
+
+ /**
+ * Hint to the DataNode that the block can be allocated on transient
+ * storage i.e. memory and written to disk lazily. The DataNode is free
+ * to ignore this hint.
+ */
+ optional bool allowLazyPersist = 13 [default = false];
+ //whether to pin the block, so Balancer won't move it.
+ optional bool pinning = 14 [default = false];
+ repeated bool targetPinnings = 15;
+}
+
+message OpTransferBlockProto {
+ required ClientOperationHeaderProto header = 1;
+ repeated DatanodeInfoProto targets = 2;
+ repeated StorageTypeProto targetStorageTypes = 3;
+}
+
+message OpReplaceBlockProto {
+ required BaseHeaderProto header = 1;
+ required string delHint = 2;
+ required DatanodeInfoProto source = 3;
+ optional StorageTypeProto storageType = 4 [default = DISK];
+}
+
+message OpCopyBlockProto {
+ required BaseHeaderProto header = 1;
+}
+
+message OpBlockChecksumProto {
+ required BaseHeaderProto header = 1;
+}
+
+/**
+ * An ID uniquely identifying a shared memory segment.
+ */
+message ShortCircuitShmIdProto {
+ required int64 hi = 1;
+ required int64 lo = 2;
+}
+
+/**
+ * An ID uniquely identifying a slot within a shared memory segment.
+ */
+message ShortCircuitShmSlotProto {
+ required ShortCircuitShmIdProto shmId = 1;
+ required int32 slotIdx = 2;
+}
+
+message OpRequestShortCircuitAccessProto {
+ required BaseHeaderProto header = 1;
+
+ /** In order to get short-circuit access to block data, clients must set this
+ * to the highest version of the block data that they can understand.
+ * Currently 1 is the only version, but more versions may exist in the future
+ * if the on-disk format changes.
+ */
+ required uint32 maxVersion = 2;
+
+ /**
+ * The shared memory slot to use, if we are using one.
+ */
+ optional ShortCircuitShmSlotProto slotId = 3;
+
+ /**
+ * True if the client supports verifying that the file descriptor has been
+ * sent successfully.
+ */
+ optional bool supportsReceiptVerification = 4 [default = false];
+}
+
+message ReleaseShortCircuitAccessRequestProto {
+ required ShortCircuitShmSlotProto slotId = 1;
+ optional DataTransferTraceInfoProto traceInfo = 2;
+}
+
+message ReleaseShortCircuitAccessResponseProto {
+ required Status status = 1;
+ optional string error = 2;
+}
+
+message ShortCircuitShmRequestProto {
+ // The name of the client requesting the shared memory segment. This is
+ // purely for logging / debugging purposes.
+ required string clientName = 1;
+ optional DataTransferTraceInfoProto traceInfo = 2;
+}
+
+message ShortCircuitShmResponseProto {
+ required Status status = 1;
+ optional string error = 2;
+ optional ShortCircuitShmIdProto id = 3;
+}
+
+message PacketHeaderProto {
+ // All fields must be fixed-length!
+ required sfixed64 offsetInBlock = 1;
+ required sfixed64 seqno = 2;
+ required bool lastPacketInBlock = 3;
+ required sfixed32 dataLen = 4;
+ optional bool syncBlock = 5 [default = false];
+}
+
+// Status is a 4-bit enum
+enum Status {
+ SUCCESS = 0;
+ ERROR = 1;
+ ERROR_CHECKSUM = 2;
+ ERROR_INVALID = 3;
+ ERROR_EXISTS = 4;
+ ERROR_ACCESS_TOKEN = 5;
+ CHECKSUM_OK = 6;
+ ERROR_UNSUPPORTED = 7;
+ OOB_RESTART = 8; // Quick restart
+ OOB_RESERVED1 = 9; // Reserved
+ OOB_RESERVED2 = 10; // Reserved
+ OOB_RESERVED3 = 11; // Reserved
+ IN_PROGRESS = 12;
+}
+
+enum ShortCircuitFdResponse {
+ DO_NOT_USE_RECEIPT_VERIFICATION = 0;
+ USE_RECEIPT_VERIFICATION = 1;
+}
+
+message PipelineAckProto {
+ required sint64 seqno = 1;
+ repeated Status reply = 2;
+ optional uint64 downstreamAckTimeNanos = 3 [default = 0];
+ repeated uint32 flag = 4 [packed=true];
+}
+
+/**
+ * Sent as part of the BlockOpResponseProto
+ * for READ_BLOCK and COPY_BLOCK operations.
+ */
+message ReadOpChecksumInfoProto {
+ required ChecksumProto checksum = 1;
+
+ /**
+ * The offset into the block at which the first packet
+ * will start. This is necessary since reads will align
+ * backwards to a checksum chunk boundary.
+ */
+ required uint64 chunkOffset = 2;
+}
+
+message BlockOpResponseProto {
+ required Status status = 1;
+
+ optional string firstBadLink = 2;
+ optional OpBlockChecksumResponseProto checksumResponse = 3;
+ optional ReadOpChecksumInfoProto readOpChecksumInfo = 4;
+
+ /** explanatory text which may be useful to log on the client side */
+ optional string message = 5;
+
+ /** If the server chooses to agree to the request of a client for
+ * short-circuit access, it will send a response message with the relevant
+ * file descriptors attached.
+ *
+ * In the body of the message, this version number will be set to the
+ * specific version number of the block data that the client is about to
+ * read.
+ */
+ optional uint32 shortCircuitAccessVersion = 6;
+}
+
+/**
+ * Message sent from the client to the DN after reading the entire
+ * read request.
+ */
+message ClientReadStatusProto {
+ required Status status = 1;
+}
+
+message DNTransferAckProto {
+ required Status status = 1;
+}
+
+message OpBlockChecksumResponseProto {
+ required uint32 bytesPerCrc = 1;
+ required uint64 crcPerBlock = 2;
+ required bytes md5 = 3;
+ optional ChecksumTypeProto crcType = 4;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
new file mode 100644
index 0000000..68b2f3a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used throughout HDFS -- i.e.
+// by the client, server, and data transfer protocols.
+
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "EncryptionZonesProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "hdfs.proto";
+
+message CreateEncryptionZoneRequestProto {
+ required string src = 1;
+ optional string keyName = 2;
+}
+
+message CreateEncryptionZoneResponseProto {
+}
+
+message ListEncryptionZonesRequestProto {
+ required int64 id = 1;
+}
+
+message EncryptionZoneProto {
+ required int64 id = 1;
+ required string path = 2;
+ required CipherSuiteProto suite = 3;
+ required CryptoProtocolVersionProto cryptoProtocolVersion = 4;
+ required string keyName = 5;
+}
+
+message ListEncryptionZonesResponseProto {
+ repeated EncryptionZoneProto zones = 1;
+ required bool hasMore = 2;
+}
+
+message GetEZForPathRequestProto {
+ required string src = 1;
+}
+
+message GetEZForPathResponseProto {
+ optional EncryptionZoneProto zone = 1;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
new file mode 100644
index 0000000..86fb462
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -0,0 +1,611 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used throughout HDFS -- i.e.
+// by the client, server, and data transfer protocols.
+
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "HdfsProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "Security.proto";
+
+/**
+ * Extended block idenfies a block
+ */
+message ExtendedBlockProto {
+ required string poolId = 1; // Block pool id - gloablly unique across clusters
+ required uint64 blockId = 2; // the local id within a pool
+ required uint64 generationStamp = 3;
+ optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid
+ // here for historical reasons
+}
+
+/**
+ * Identifies a Datanode
+ */
+message DatanodeIDProto {
+ required string ipAddr = 1; // IP address
+ required string hostName = 2; // hostname
+ required string datanodeUuid = 3; // UUID assigned to the Datanode. For
+ // upgraded clusters this is the same
+ // as the original StorageID of the
+ // Datanode.
+ required uint32 xferPort = 4; // data streaming port
+ required uint32 infoPort = 5; // datanode http port
+ required uint32 ipcPort = 6; // ipc server port
+ optional uint32 infoSecurePort = 7 [default = 0]; // datanode https port
+}
+
+/**
+ * Datanode local information
+ */
+message DatanodeLocalInfoProto {
+ required string softwareVersion = 1;
+ required string configVersion = 2;
+ required uint64 uptime = 3;
+}
+
+/**
+ * DatanodeInfo array
+ */
+message DatanodeInfosProto {
+ repeated DatanodeInfoProto datanodes = 1;
+}
+
+/**
+ * The status of a Datanode
+ */
+message DatanodeInfoProto {
+ required DatanodeIDProto id = 1;
+ optional uint64 capacity = 2 [default = 0];
+ optional uint64 dfsUsed = 3 [default = 0];
+ optional uint64 remaining = 4 [default = 0];
+ optional uint64 blockPoolUsed = 5 [default = 0];
+ optional uint64 lastUpdate = 6 [default = 0];
+ optional uint32 xceiverCount = 7 [default = 0];
+ optional string location = 8;
+ enum AdminState {
+ NORMAL = 0;
+ DECOMMISSION_INPROGRESS = 1;
+ DECOMMISSIONED = 2;
+ }
+
+ optional AdminState adminState = 10 [default = NORMAL];
+ optional uint64 cacheCapacity = 11 [default = 0];
+ optional uint64 cacheUsed = 12 [default = 0];
+ optional uint64 lastUpdateMonotonic = 13 [default = 0];
+}
+
+/**
+ * Represents a storage available on the datanode
+ */
+message DatanodeStorageProto {
+ enum StorageState {
+ NORMAL = 0;
+ READ_ONLY_SHARED = 1;
+ }
+
+ required string storageUuid = 1;
+ optional StorageState state = 2 [default = NORMAL];
+ optional StorageTypeProto storageType = 3 [default = DISK];
+}
+
+message StorageReportProto {
+ required string storageUuid = 1 [ deprecated = true ];
+ optional bool failed = 2 [ default = false ];
+ optional uint64 capacity = 3 [ default = 0 ];
+ optional uint64 dfsUsed = 4 [ default = 0 ];
+ optional uint64 remaining = 5 [ default = 0 ];
+ optional uint64 blockPoolUsed = 6 [ default = 0 ];
+ optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
+}
+
+/**
+ * Summary of a file or directory
+ */
+message ContentSummaryProto {
+ required uint64 length = 1;
+ required uint64 fileCount = 2;
+ required uint64 directoryCount = 3;
+ required uint64 quota = 4;
+ required uint64 spaceConsumed = 5;
+ required uint64 spaceQuota = 6;
+ optional StorageTypeQuotaInfosProto typeQuotaInfos = 7;
+}
+
+/**
+ * Storage type quota and usage information of a file or directory
+ */
+message StorageTypeQuotaInfosProto {
+ repeated StorageTypeQuotaInfoProto typeQuotaInfo = 1;
+}
+
+message StorageTypeQuotaInfoProto {
+ required StorageTypeProto type = 1;
+ required uint64 quota = 2;
+ required uint64 consumed = 3;
+}
+
+/**
+ * Contains a list of paths corresponding to corrupt files and a cookie
+ * used for iterative calls to NameNode.listCorruptFileBlocks.
+ *
+ */
+message CorruptFileBlocksProto {
+ repeated string files = 1;
+ required string cookie = 2;
+}
+
+/**
+ * File or Directory permision - same spec as posix
+ */
+message FsPermissionProto {
+ required uint32 perm = 1; // Actually a short - only 16bits used
+}
+
+/**
+ * Types of recognized storage media.
+ */
+enum StorageTypeProto {
+ DISK = 1;
+ SSD = 2;
+ ARCHIVE = 3;
+ RAM_DISK = 4;
+}
+
+/**
+ * A list of storage types.
+ */
+message StorageTypesProto {
+ repeated StorageTypeProto storageTypes = 1;
+}
+
+/**
+ * Block replica storage policy.
+ */
+message BlockStoragePolicyProto {
+ required uint32 policyId = 1;
+ required string name = 2;
+ // a list of storage types for storing the block replicas when creating a
+ // block.
+ required StorageTypesProto creationPolicy = 3;
+ // A list of storage types for creation fallback storage.
+ optional StorageTypesProto creationFallbackPolicy = 4;
+ optional StorageTypesProto replicationFallbackPolicy = 5;
+}
+
+/**
+ * A list of storage IDs.
+ */
+message StorageUuidsProto {
+ repeated string storageUuids = 1;
+}
+
+/**
+ * A LocatedBlock gives information about a block and its location.
+ */
+message LocatedBlockProto {
+ required ExtendedBlockProto b = 1;
+ required uint64 offset = 2; // offset of first byte of block in the file
+ repeated DatanodeInfoProto locs = 3; // Locations ordered by proximity to client ip
+ required bool corrupt = 4; // true if all replicas of a block are corrupt, else false
+ // If block has few corrupt replicas, they are filtered and
+ // their locations are not part of this object
+
+ required hadoop.common.TokenProto blockToken = 5;
+ repeated bool isCached = 6 [packed=true]; // if a location in locs is cached
+ repeated StorageTypeProto storageTypes = 7;
+ repeated string storageIDs = 8;
+}
+
+message DataEncryptionKeyProto {
+ required uint32 keyId = 1;
+ required string blockPoolId = 2;
+ required bytes nonce = 3;
+ required bytes encryptionKey = 4;
+ required uint64 expiryDate = 5;
+ optional string encryptionAlgorithm = 6;
+}
+
+/**
+ * Cipher suite.
+ */
+enum CipherSuiteProto {
+ UNKNOWN = 1;
+ AES_CTR_NOPADDING = 2;
+}
+
+/**
+ * Crypto protocol version used to access encrypted files.
+ */
+enum CryptoProtocolVersionProto {
+ UNKNOWN_PROTOCOL_VERSION = 1;
+ ENCRYPTION_ZONES = 2;
+}
+
+/**
+ * Encryption information for a file.
+ */
+message FileEncryptionInfoProto {
+ required CipherSuiteProto suite = 1;
+ required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
+ required bytes key = 3;
+ required bytes iv = 4;
+ required string keyName = 5;
+ required string ezKeyVersionName = 6;
+}
+
+/**
+ * Encryption information for an individual
+ * file within an encryption zone
+ */
+message PerFileEncryptionInfoProto {
+ required bytes key = 1;
+ required bytes iv = 2;
+ required string ezKeyVersionName = 3;
+}
+
+/**
+ * Encryption information for an encryption
+ * zone
+ */
+message ZoneEncryptionInfoProto {
+ required CipherSuiteProto suite = 1;
+ required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
+ required string keyName = 3;
+}
+
+/**
+ * Cipher option
+ */
+message CipherOptionProto {
+ required CipherSuiteProto suite = 1;
+ optional bytes inKey = 2;
+ optional bytes inIv = 3;
+ optional bytes outKey = 4;
+ optional bytes outIv = 5;
+}
+
+/**
+ * A set of file blocks and their locations.
+ */
+message LocatedBlocksProto {
+ required uint64 fileLength = 1;
+ repeated LocatedBlockProto blocks = 2;
+ required bool underConstruction = 3;
+ optional LocatedBlockProto lastBlock = 4;
+ required bool isLastBlockComplete = 5;
+ optional FileEncryptionInfoProto fileEncryptionInfo = 6;
+}
+
+/**
+ * Status of a file, directory or symlink
+ * Optionally includes a file's block locations if requested by client on the rpc call.
+ */
+message HdfsFileStatusProto {
+ enum FileType {
+ IS_DIR = 1;
+ IS_FILE = 2;
+ IS_SYMLINK = 3;
+ }
+ required FileType fileType = 1;
+ required bytes path = 2; // local name of inode encoded java UTF8
+ required uint64 length = 3;
+ required FsPermissionProto permission = 4;
+ required string owner = 5;
+ required string group = 6;
+ required uint64 modification_time = 7;
+ required uint64 access_time = 8;
+
+ // Optional fields for symlink
+ optional bytes symlink = 9; // if symlink, target encoded java UTF8
+
+ // Optional fields for file
+ optional uint32 block_replication = 10 [default = 0]; // only 16bits used
+ optional uint64 blocksize = 11 [default = 0];
+ optional LocatedBlocksProto locations = 12; // suppled only if asked by client
+
+ // Optional field for fileId
+ optional uint64 fileId = 13 [default = 0]; // default as an invalid id
+ optional int32 childrenNum = 14 [default = -1];
+ // Optional field for file encryption
+ optional FileEncryptionInfoProto fileEncryptionInfo = 15;
+
+ optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
+}
+
+/**
+ * Checksum algorithms/types used in HDFS
+ * Make sure this enum's integer values match enum values' id properties defined
+ * in org.apache.hadoop.util.DataChecksum.Type
+ */
+enum ChecksumTypeProto {
+ CHECKSUM_NULL = 0;
+ CHECKSUM_CRC32 = 1;
+ CHECKSUM_CRC32C = 2;
+}
+
+/**
+ * HDFS Server Defaults
+ */
+message FsServerDefaultsProto {
+ required uint64 blockSize = 1;
+ required uint32 bytesPerChecksum = 2;
+ required uint32 writePacketSize = 3;
+ required uint32 replication = 4; // Actually a short - only 16 bits used
+ required uint32 fileBufferSize = 5;
+ optional bool encryptDataTransfer = 6 [default = false];
+ optional uint64 trashInterval = 7 [default = 0];
+ optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
+}
+
+
+/**
+ * Directory listing
+ */
+message DirectoryListingProto {
+ repeated HdfsFileStatusProto partialListing = 1;
+ required uint32 remainingEntries = 2;
+}
+
+/**
+ * Status of a snapshottable directory: besides the normal information for
+ * a directory status, also include snapshot quota, number of snapshots, and
+ * the full path of the parent directory.
+ */
+message SnapshottableDirectoryStatusProto {
+ required HdfsFileStatusProto dirStatus = 1;
+
+ // Fields specific for snapshottable directory
+ required uint32 snapshot_quota = 2;
+ required uint32 snapshot_number = 3;
+ required bytes parent_fullpath = 4;
+}
+
+/**
+ * Snapshottable directory listing
+ */
+message SnapshottableDirectoryListingProto {
+ repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
+}
+
+/**
+ * Snapshot diff report entry
+ */
+message SnapshotDiffReportEntryProto {
+ required bytes fullpath = 1;
+ required string modificationLabel = 2;
+ optional bytes targetPath = 3;
+}
+
+/**
+ * Snapshot diff report
+ */
+message SnapshotDiffReportProto {
+ // full path of the directory where snapshots were taken
+ required string snapshotRoot = 1;
+ required string fromSnapshot = 2;
+ required string toSnapshot = 3;
+ repeated SnapshotDiffReportEntryProto diffReportEntries = 4;
+}
+
+/**
+ * Common node information shared by all the nodes in the cluster
+ */
+message StorageInfoProto {
+ required uint32 layoutVersion = 1; // Layout version of the file system
+ required uint32 namespceID = 2; // File system namespace ID
+ required string clusterID = 3; // ID of the cluster
+ required uint64 cTime = 4; // File system creation time
+}
+
+/**
+ * Information sent by a namenode to identify itself to the primary namenode.
+ */
+message NamenodeRegistrationProto {
+ required string rpcAddress = 1; // host:port of the namenode RPC address
+ required string httpAddress = 2; // host:port of the namenode http server
+ enum NamenodeRoleProto {
+ NAMENODE = 1;
+ BACKUP = 2;
+ CHECKPOINT = 3;
+ }
+ required StorageInfoProto storageInfo = 3; // Node information
+ optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role
+}
+
+/**
+ * Unique signature to identify checkpoint transactions.
+ */
+message CheckpointSignatureProto {
+ required string blockPoolId = 1;
+ required uint64 mostRecentCheckpointTxId = 2;
+ required uint64 curSegmentTxId = 3;
+ required StorageInfoProto storageInfo = 4;
+}
+
+/**
+ * Command sent from one namenode to another namenode.
+ */
+message NamenodeCommandProto {
+ enum Type {
+ NamenodeCommand = 0; // Base command
+ CheckPointCommand = 1; // Check point command
+ }
+ required uint32 action = 1;
+ required Type type = 2;
+ optional CheckpointCommandProto checkpointCmd = 3;
+}
+
+/**
+ * Command returned from primary to checkpointing namenode.
+ * This command has checkpoint signature that identifies
+ * checkpoint transaction and is needed for further
+ * communication related to checkpointing.
+ */
+message CheckpointCommandProto {
+ // Unique signature to identify checkpoint transation
+ required CheckpointSignatureProto signature = 1;
+
+ // If true, return transfer image to primary upon the completion of checkpoint
+ required bool needToReturnImage = 2;
+}
+
+/**
+ * Block information
+ *
+ * Please be wary of adding additional fields here, since INodeFiles
+ * need to fit in PB's default max message size of 64MB.
+ * We restrict the max # of blocks per file
+ * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
+ * to avoid changing this.
+ */
+message BlockProto {
+ required uint64 blockId = 1;
+ required uint64 genStamp = 2;
+ optional uint64 numBytes = 3 [default = 0];
+}
+
+/**
+ * Block and datanodes where is it located
+ */
+message BlockWithLocationsProto {
+ required BlockProto block = 1; // Block
+ repeated string datanodeUuids = 2; // Datanodes with replicas of the block
+ repeated string storageUuids = 3; // Storages with replicas of the block
+ repeated StorageTypeProto storageTypes = 4;
+}
+
+/**
+ * List of block with locations
+ */
+message BlocksWithLocationsProto {
+ repeated BlockWithLocationsProto blocks = 1;
+}
+
+/**
+ * Editlog information with available transactions
+ */
+message RemoteEditLogProto {
+ required uint64 startTxId = 1; // Starting available edit log transaction
+ required uint64 endTxId = 2; // Ending available edit log transaction
+ optional bool isInProgress = 3 [default = false];
+}
+
+/**
+ * Enumeration of editlogs available on a remote namenode
+ */
+message RemoteEditLogManifestProto {
+ repeated RemoteEditLogProto logs = 1;
+}
+
+/**
+ * Namespace information that describes namespace on a namenode
+ */
+message NamespaceInfoProto {
+ required string buildVersion = 1; // Software revision version (e.g. an svn or git revision)
+ required uint32 unused = 2; // Retained for backward compatibility
+ required string blockPoolID = 3; // block pool used by the namespace
+ required StorageInfoProto storageInfo = 4;// Node information
+ required string softwareVersion = 5; // Software version number (e.g. 2.0.0)
+ optional uint64 capabilities = 6 [default = 0]; // feature flags
+}
+
+/**
+ * Block access token information
+ */
+message BlockKeyProto {
+ required uint32 keyId = 1; // Key identifier
+ required uint64 expiryDate = 2; // Expiry time in milliseconds
+ optional bytes keyBytes = 3; // Key secret
+}
+
+/**
+ * Current key and set of block keys at the namenode.
+ */
+message ExportedBlockKeysProto {
+ required bool isBlockTokenEnabled = 1;
+ required uint64 keyUpdateInterval = 2;
+ required uint64 tokenLifeTime = 3;
+ required BlockKeyProto currentKey = 4;
+ repeated BlockKeyProto allKeys = 5;
+}
+
+/**
+ * State of a block replica at a datanode
+ */
+enum ReplicaStateProto {
+ FINALIZED = 0; // State of a replica when it is not modified
+ RBW = 1; // State of replica that is being written to
+ RWR = 2; // State of replica that is waiting to be recovered
+ RUR = 3; // State of replica that is under recovery
+ TEMPORARY = 4; // State of replica that is created for replication
+}
+
+/**
+ * Block that needs to be recovered with at a given location
+ */
+message RecoveringBlockProto {
+ required uint64 newGenStamp = 1; // New genstamp post recovery
+ required LocatedBlockProto block = 2; // Block to be recovered
+ optional BlockProto truncateBlock = 3; // New block for recovery (truncate)
+}
+
+/**
+ * void request
+ */
+message VersionRequestProto {
+}
+
+/**
+ * Version response from namenode.
+ */
+message VersionResponseProto {
+ required NamespaceInfoProto info = 1;
+}
+
+/**
+ * Information related to a snapshot
+ * TODO: add more information
+ */
+message SnapshotInfoProto {
+ required string snapshotName = 1;
+ required string snapshotRoot = 2;
+ required FsPermissionProto permission = 3;
+ required string owner = 4;
+ required string group = 5;
+ required string createTime = 6;
+ // TODO: do we need access time?
+}
+
+/**
+ * Rolling upgrade status
+ */
+message RollingUpgradeStatusProto {
+ required string blockPoolId = 1;
+ optional bool finalized = 2 [default = false];
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto
new file mode 100644
index 0000000..5b78fe6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+// This file contains protocol buffers used to communicate edits to clients
+// as part of the inotify system.
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "InotifyProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "acl.proto";
+import "xattr.proto";
+import "hdfs.proto";
+
+enum EventType {
+ EVENT_CREATE = 0x0;
+ EVENT_CLOSE = 0x1;
+ EVENT_APPEND = 0x2;
+ EVENT_RENAME = 0x3;
+ EVENT_METADATA = 0x4;
+ EVENT_UNLINK = 0x5;
+}
+
+message EventProto {
+ required EventType type = 1;
+ required bytes contents = 2;
+}
+
+message EventBatchProto {
+ required int64 txid = 1;
+ repeated EventProto events = 2;
+}
+
+enum INodeType {
+ I_TYPE_FILE = 0x0;
+ I_TYPE_DIRECTORY = 0x1;
+ I_TYPE_SYMLINK = 0x2;
+}
+
+enum MetadataUpdateType {
+ META_TYPE_TIMES = 0x0;
+ META_TYPE_REPLICATION = 0x1;
+ META_TYPE_OWNER = 0x2;
+ META_TYPE_PERMS = 0x3;
+ META_TYPE_ACLS = 0x4;
+ META_TYPE_XATTRS = 0x5;
+}
+
+message CreateEventProto {
+ required INodeType type = 1;
+ required string path = 2;
+ required int64 ctime = 3;
+ required string ownerName = 4;
+ required string groupName = 5;
+ required FsPermissionProto perms = 6;
+ optional int32 replication = 7;
+ optional string symlinkTarget = 8;
+ optional bool overwrite = 9;
+ optional int64 defaultBlockSize = 10 [default=0];
+}
+
+message CloseEventProto {
+ required string path = 1;
+ required int64 fileSize = 2;
+ required int64 timestamp = 3;
+}
+
+message AppendEventProto {
+ required string path = 1;
+ optional bool newBlock = 2 [default = false];
+}
+
+message RenameEventProto {
+ required string srcPath = 1;
+ required string destPath = 2;
+ required int64 timestamp = 3;
+}
+
+message MetadataUpdateEventProto {
+ required string path = 1;
+ required MetadataUpdateType type = 2;
+ optional int64 mtime = 3;
+ optional int64 atime = 4;
+ optional int32 replication = 5;
+ optional string ownerName = 6;
+ optional string groupName = 7;
+ optional FsPermissionProto perms = 8;
+ repeated AclEntryProto acls = 9;
+ repeated XAttrProto xAttrs = 10;
+ optional bool xAttrsRemoved = 11;
+}
+
+message UnlinkEventProto {
+ required string path = 1;
+ required int64 timestamp = 2;
+}
+
+message EventsListProto {
+ repeated EventProto events = 1; // deprecated
+ required int64 firstTxid = 2;
+ required int64 lastTxid = 3;
+ required int64 syncTxid = 4;
+ repeated EventBatchProto batch = 5;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/xattr.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/xattr.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/xattr.proto
new file mode 100644
index 0000000..6c8b5eb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/xattr.proto
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "XAttrProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+message XAttrProto {
+ enum XAttrNamespaceProto {
+ USER = 0;
+ TRUSTED = 1;
+ SECURITY = 2;
+ SYSTEM = 3;
+ RAW = 4;
+ }
+
+ required XAttrNamespaceProto namespace = 1;
+ required string name = 2;
+ optional bytes value = 3;
+}
+
+enum XAttrSetFlagProto {
+ XATTR_CREATE = 0x01;
+ XATTR_REPLACE = 0x02;
+}
+
+message SetXAttrRequestProto {
+ required string src = 1;
+ optional XAttrProto xAttr = 2;
+ optional uint32 flag = 3; //bits set using XAttrSetFlagProto
+}
+
+message SetXAttrResponseProto {
+}
+
+message GetXAttrsRequestProto {
+ required string src = 1;
+ repeated XAttrProto xAttrs = 2;
+}
+
+message GetXAttrsResponseProto {
+ repeated XAttrProto xAttrs = 1;
+}
+
+message ListXAttrsRequestProto {
+ required string src = 1;
+}
+
+message ListXAttrsResponseProto {
+ repeated XAttrProto xAttrs = 1;
+}
+
+message RemoveXAttrRequestProto {
+ required string src = 1;
+ optional XAttrProto xAttr = 2;
+}
+
+message RemoveXAttrResponseProto {
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a7c9e7c..f595751 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -710,6 +710,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8712. Remove 'public' and 'abstracta modifiers in FsVolumeSpi and
FsDatasetSpi (Lei (Eddy) Xu via vinayakumarb)
+ HDFS-8726. Move protobuf files that define the client-sever protocols to
+ hdfs-client. (wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index f90644c..db38851 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -328,26 +328,20 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<protocCommand>${protoc.path}</protocCommand>
<imports>
<param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
+ <param>${basedir}/../hadoop-hdfs-client/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
- <include>ClientDatanodeProtocol.proto</include>
- <include>ClientNamenodeProtocol.proto</include>
<include>DatanodeProtocol.proto</include>
<include>HAZKInfo.proto</include>
<include>InterDatanodeProtocol.proto</include>
<include>JournalProtocol.proto</include>
<include>NamenodeProtocol.proto</include>
<include>QJournalProtocol.proto</include>
- <include>acl.proto</include>
- <include>xattr.proto</include>
- <include>datatransfer.proto</include>
+ <include>editlog.proto</include>
<include>fsimage.proto</include>
- <include>hdfs.proto</include>
- <include>encryption.proto</include>
- <include>inotify.proto</include>
</includes>
</source>
<output>${project.build.directory}/generated-sources/java</output>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
index 3b205e4..7e58606 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
@@ -112,7 +112,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<protocCommand>${protoc.path}</protocCommand>
<imports>
<param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
- <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param>
+ <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6182d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index c8e565e..ab36f17 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -103,8 +103,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto;
-import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto;
+import org.apache.hadoop.hdfs.protocol.proto.EditLogProtos.AclEditLogProto;
+import org.apache.hadoop.hdfs.protocol.proto.EditLogProtos.XAttrEditLogProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
[14/21] hadoop git commit: HDFS-8729. Fix
TestFileTruncate#testTruncateWithDataNodesRestartImmediately which
occasionally failed. Contributed by Walter Su.
Posted by aw...@apache.org.
HDFS-8729. Fix TestFileTruncate#testTruncateWithDataNodesRestartImmediately which occasionally failed. Contributed by Walter Su.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4ca530c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4ca530c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4ca530c
Branch: refs/heads/HADOOP-12111
Commit: f4ca530c1cc9ece25c5ef01f99a94eb9e678e890
Parents: ac60483
Author: Jing Zhao <ji...@apache.org>
Authored: Thu Jul 9 13:17:52 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Thu Jul 9 13:17:52 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java | 3 +++
2 files changed, 6 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ca530c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f595751..e26e061 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1023,6 +1023,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8642. Make TestFileTruncate more reliable. (Rakesh R via
Arpit Agarwal)
+ HDFS-8729. Fix TestFileTruncate#testTruncateWithDataNodesRestartImmediately
+ which occasionally failed. (Walter Su via jing9)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ca530c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index a91d6c9..8e54edc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -804,6 +804,9 @@ public class TestFileTruncate {
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
+ Thread.sleep(2000);
+ // trigger the second time BR to delete the corrupted replica if there's one
+ cluster.triggerBlockReports();
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// Old replica is disregarded and replaced with the truncated one on dn0
[18/21] hadoop git commit: HDFS-8749. Fix findbugs warnings in
BlockManager.java. Contributed by Brahma Reddy Battula.
Posted by aw...@apache.org.
HDFS-8749. Fix findbugs warnings in BlockManager.java. Contributed by Brahma Reddy Battula.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d66302ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d66302ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d66302ed
Branch: refs/heads/HADOOP-12111
Commit: d66302ed9b2c25b560d8319d6d755aee7cfa4d67
Parents: 5214876
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jul 10 15:04:06 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri Jul 10 15:04:06 2015 +0900
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 2 --
2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d66302ed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e26e061..5c1208d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1026,6 +1026,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8729. Fix TestFileTruncate#testTruncateWithDataNodesRestartImmediately
which occasionally failed. (Walter Su via jing9)
+ HDFS-8749. Fix findbugs warnings in BlockManager.java.
+ (Brahma Reddy Battula via aajisaka)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d66302ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0b60a97..7dce2a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3596,8 +3596,6 @@ public class BlockManager implements BlockStatsMXBean {
String src, BlockInfo[] blocks) {
for (BlockInfo b: blocks) {
if (!b.isComplete()) {
- final BlockInfoUnderConstruction uc =
- (BlockInfoUnderConstruction)b;
final int numNodes = b.numNodes();
final int min = getMinStorageNum(b);
final BlockUCState state = b.getBlockUCState();
[08/21] hadoop git commit: YARN-2194. Addendum patch to fix failing
unit test in TestPrivilegedOperationExecutor. Contributed by Sidharta
Seethana.
Posted by aw...@apache.org.
YARN-2194. Addendum patch to fix failing unit test in TestPrivilegedOperationExecutor. Contributed by Sidharta Seethana.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63d03650
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63d03650
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63d03650
Branch: refs/heads/HADOOP-12111
Commit: 63d0365088ff9fca0baaf3c4c3c01f80c72d3281
Parents: 6b7692c
Author: Varun Vasudev <vv...@apache.org>
Authored: Thu Jul 9 11:49:28 2015 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Thu Jul 9 11:51:59 2015 +0530
----------------------------------------------------------------------
.../linux/privileged/TestPrivilegedOperationExecutor.java | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63d03650/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
index 7154d03..8f297ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
@@ -217,8 +217,10 @@ public class TestPrivilegedOperationExecutor {
.squashCGroupOperations(ops);
String expected = new StringBuffer
(PrivilegedOperation.CGROUP_ARG_PREFIX)
- .append(cGroupTasks1).append(',')
- .append(cGroupTasks2).append(',')
+ .append(cGroupTasks1).append(PrivilegedOperation
+ .LINUX_FILE_PATH_SEPARATOR)
+ .append(cGroupTasks2).append(PrivilegedOperation
+ .LINUX_FILE_PATH_SEPARATOR)
.append(cGroupTasks3).toString();
//We expect axactly one argument
[09/21] hadoop git commit: MAPREDUCE-6426.
TestShuffleHandler#testGetMapOutputInfo is failing. Contributed by zhihai xu.
Posted by aw...@apache.org.
MAPREDUCE-6426. TestShuffleHandler#testGetMapOutputInfo is failing.
Contributed by zhihai xu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fffb15bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fffb15bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fffb15bb
Branch: refs/heads/HADOOP-12111
Commit: fffb15bb431fcdd7ca4ce0e249f9d45f4968497b
Parents: 63d0365
Author: Devaraj K <de...@apache.org>
Authored: Thu Jul 9 15:00:26 2015 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Thu Jul 9 15:00:26 2015 +0530
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../test/java/org/apache/hadoop/mapred/TestShuffleHandler.java | 6 +++++-
2 files changed, 8 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fffb15bb/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 874ecea..81b202b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -548,6 +548,9 @@ Release 2.7.2 - UNRELEASED
MAPREDUCE-6425. ShuffleHandler passes wrong "base" parameter to getMapOutputInfo
if mapId is not in the cache. (zhihai xu via devaraj)
+ MAPREDUCE-6426. TestShuffleHandler#testGetMapOutputInfo is failing.
+ (zhihai xu via devaraj)
+
Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fffb15bb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
index 746071f..bad9b2d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
@@ -837,6 +837,9 @@ public class TestShuffleHandler {
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "simple");
+ UserGroupInformation.setConfiguration(conf);
File absLogDir = new File("target", TestShuffleHandler.class.
getSimpleName() + "LocDir").getAbsoluteFile();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, absLogDir.getAbsolutePath());
@@ -924,7 +927,8 @@ public class TestShuffleHandler {
} catch (EOFException e) {
// ignore
}
- Assert.assertEquals(failures.size(), 0);
+ Assert.assertEquals("sendError called due to shuffle error",
+ 0, failures.size());
} finally {
shuffleHandler.stop();
FileUtil.fullyDelete(absLogDir);
[16/21] hadoop git commit: HADOOP-12210. Collect network usage on the
node. Contributed by Robert Grandl
Posted by aw...@apache.org.
HADOOP-12210. Collect network usage on the node. Contributed by Robert Grandl
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a0752d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a0752d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a0752d8
Branch: refs/heads/HADOOP-12111
Commit: 1a0752d85a15499d120b4a79af9bd740fcd1f8e0
Parents: 0e602fa
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Jul 6 17:28:20 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Thu Jul 9 17:48:43 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +
.../java/org/apache/hadoop/util/SysInfo.java | 12 +++
.../org/apache/hadoop/util/SysInfoLinux.java | 93 +++++++++++++++++++-
.../org/apache/hadoop/util/SysInfoWindows.java | 15 ++++
.../apache/hadoop/util/TestSysInfoLinux.java | 40 ++++++++-
.../gridmix/DummyResourceCalculatorPlugin.java | 19 ++++
.../yarn/util/ResourceCalculatorPlugin.java | 16 ++++
7 files changed, 195 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a0752d8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d9a9eba..3d4f1e4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -693,6 +693,8 @@ Release 2.8.0 - UNRELEASED
HADOOP-12180. Move ResourceCalculatorPlugin from YARN to Common.
(Chris Douglas via kasha)
+ HADOOP-12210. Collect network usage on the node (Robert Grandl via cdouglas)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a0752d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
index ec7fb24..24b339d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
@@ -108,4 +108,16 @@ public abstract class SysInfo {
*/
public abstract float getCpuUsage();
+ /**
+ * Obtain the aggregated number of bytes read over the network.
+ * @return total number of bytes read.
+ */
+ public abstract long getNetworkBytesRead();
+
+ /**
+ * Obtain the aggregated number of bytes written to the network.
+ * @return total number of bytes written.
+ */
+ public abstract long getNetworkBytesWritten();
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a0752d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
index 055298d..8801985 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
@@ -83,9 +83,22 @@ public class SysInfoLinux extends SysInfo {
"[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
private CpuTimeTracker cpuTimeTracker;
+ /**
+ * Pattern for parsing /proc/net/dev.
+ */
+ private static final String PROCFS_NETFILE = "/proc/net/dev";
+ private static final Pattern PROCFS_NETFILE_FORMAT =
+ Pattern .compile("^[ \t]*([a-zA-Z]+[0-9]*):" +
+ "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" +
+ "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" +
+ "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" +
+ "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+).*");
+
+
private String procfsMemFile;
private String procfsCpuFile;
private String procfsStatFile;
+ private String procfsNetFile;
private long jiffyLengthInMillis;
private long ramSize = 0;
@@ -98,6 +111,8 @@ public class SysInfoLinux extends SysInfo {
/* number of physical cores on the system. */
private int numCores = 0;
private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
+ private long numNetBytesRead = 0L; // aggregated bytes read from network
+ private long numNetBytesWritten = 0L; // aggregated bytes written to network
private boolean readMemInfoFile = false;
private boolean readCpuInfoFile = false;
@@ -130,7 +145,7 @@ public class SysInfoLinux extends SysInfo {
public SysInfoLinux() {
this(PROCFS_MEMFILE, PROCFS_CPUINFO, PROCFS_STAT,
- JIFFY_LENGTH_IN_MILLIS);
+ PROCFS_NETFILE, JIFFY_LENGTH_IN_MILLIS);
}
/**
@@ -139,16 +154,19 @@ public class SysInfoLinux extends SysInfo {
* @param procfsMemFile fake file for /proc/meminfo
* @param procfsCpuFile fake file for /proc/cpuinfo
* @param procfsStatFile fake file for /proc/stat
+ * @param procfsNetFile fake file for /proc/net/dev
* @param jiffyLengthInMillis fake jiffy length value
*/
@VisibleForTesting
public SysInfoLinux(String procfsMemFile,
String procfsCpuFile,
String procfsStatFile,
+ String procfsNetFile,
long jiffyLengthInMillis) {
this.procfsMemFile = procfsMemFile;
this.procfsCpuFile = procfsCpuFile;
this.procfsStatFile = procfsStatFile;
+ this.procfsNetFile = procfsNetFile;
this.jiffyLengthInMillis = jiffyLengthInMillis;
this.cpuTimeTracker = new CpuTimeTracker(jiffyLengthInMillis);
}
@@ -338,6 +356,61 @@ public class SysInfoLinux extends SysInfo {
}
}
+ /**
+ * Read /proc/net/dev file, parse and calculate amount
+ * of bytes read and written through the network.
+ */
+ private void readProcNetInfoFile() {
+
+ numNetBytesRead = 0L;
+ numNetBytesWritten = 0L;
+
+ // Read "/proc/net/dev" file
+ BufferedReader in;
+ InputStreamReader fReader;
+ try {
+ fReader = new InputStreamReader(
+ new FileInputStream(procfsNetFile), Charset.forName("UTF-8"));
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ return;
+ }
+
+ Matcher mat;
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCFS_NETFILE_FORMAT.matcher(str);
+ if (mat.find()) {
+ assert mat.groupCount() >= 16;
+
+ // ignore loopback interfaces
+ if (mat.group(1).equals("lo")) {
+ str = in.readLine();
+ continue;
+ }
+ numNetBytesRead += Long.parseLong(mat.group(2));
+ numNetBytesWritten += Long.parseLong(mat.group(10));
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ }
+
/** {@inheritDoc} */
@Override
public long getPhysicalMemorySize() {
@@ -405,6 +478,20 @@ public class SysInfoLinux extends SysInfo {
return overallCpuUsage;
}
+ /** {@inheritDoc} */
+ @Override
+ public long getNetworkBytesRead() {
+ readProcNetInfoFile();
+ return numNetBytesRead;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getNetworkBytesWritten() {
+ readProcNetInfoFile();
+ return numNetBytesWritten;
+ }
+
/**
* Test the {@link SysInfoLinux}.
*
@@ -424,6 +511,10 @@ public class SysInfoLinux extends SysInfo {
System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency());
System.out.println("Cumulative CPU time (ms) : " +
plugin.getCumulativeCpuTime());
+ System.out.println("Total network read (bytes) : "
+ + plugin.getNetworkBytesRead());
+ System.out.println("Total network written (bytes) : "
+ + plugin.getNetworkBytesWritten());
try {
// Sleep so we can compute the CPU usage
Thread.sleep(500L);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a0752d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
index da4c1c5..f8542a3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
@@ -178,4 +178,19 @@ public class SysInfoWindows extends SysInfo {
refreshIfNeeded();
return cpuUsage;
}
+
+ /** {@inheritDoc} */
+ @Override
+ public long getNetworkBytesRead() {
+ // TODO unimplemented
+ return 0L;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getNetworkBytesWritten() {
+ // TODO unimplemented
+ return 0L;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a0752d8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
index 73edc77..2a31f31 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
@@ -44,8 +44,10 @@ public class TestSysInfoLinux {
public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
String procfsCpuFile,
String procfsStatFile,
+ String procfsNetFile,
long jiffyLengthInMillis) {
- super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
+ super(procfsMemFile, procfsCpuFile, procfsStatFile, procfsNetFile,
+ jiffyLengthInMillis);
}
@Override
long getCurrentTime() {
@@ -61,14 +63,17 @@ public class TestSysInfoLinux {
private static final String FAKE_MEMFILE;
private static final String FAKE_CPUFILE;
private static final String FAKE_STATFILE;
+ private static final String FAKE_NETFILE;
private static final long FAKE_JIFFY_LENGTH = 10L;
static {
int randomNum = (new Random()).nextInt(1000000000);
FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
+ FAKE_NETFILE = TEST_ROOT_DIR + File.separator + "NETINFO_" + randomNum;
plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
FAKE_STATFILE,
+ FAKE_NETFILE,
FAKE_JIFFY_LENGTH);
}
static final String MEMINFO_FORMAT =
@@ -141,6 +146,17 @@ public class TestSysInfoLinux {
"procs_running 1\n" +
"procs_blocked 0\n";
+ static final String NETINFO_FORMAT =
+ "Inter-| Receive | Transmit\n"+
+ "face |bytes packets errs drop fifo frame compressed multicast|bytes packets"+
+ "errs drop fifo colls carrier compressed\n"+
+ " lo: 42236310 563003 0 0 0 0 0 0 42236310 563003 " +
+ "0 0 0 0 0 0\n"+
+ " eth0: %d 3452527 0 0 0 0 0 299787 %d 1866280 0 0 " +
+ "0 0 0 0\n"+
+ " eth1: %d 3152521 0 0 0 0 0 219781 %d 1866290 0 0 " +
+ "0 0 0 0\n";
+
/**
* Test parsing /proc/stat and /proc/cpuinfo
* @throws IOException
@@ -320,4 +336,26 @@ public class TestSysInfoLinux {
IOUtils.closeQuietly(fWriter);
}
}
+
+ /**
+ * Test parsing /proc/net/dev
+ * @throws IOException
+ */
+ @Test
+ public void parsingProcNetFile() throws IOException {
+ long numBytesReadIntf1 = 2097172468L;
+ long numBytesWrittenIntf1 = 1355620114L;
+ long numBytesReadIntf2 = 1097172460L;
+ long numBytesWrittenIntf2 = 1055620110L;
+ File tempFile = new File(FAKE_NETFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_NETFILE);
+ fWriter.write(String.format(NETINFO_FORMAT,
+ numBytesReadIntf1, numBytesWrittenIntf1,
+ numBytesReadIntf2, numBytesWrittenIntf2));
+ fWriter.close();
+ assertEquals(plugin.getNetworkBytesRead(), numBytesReadIntf1 + numBytesReadIntf2);
+ assertEquals(plugin.getNetworkBytesWritten(), numBytesWrittenIntf1 + numBytesWrittenIntf2);
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a0752d8/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
index fd4cb83..b86303b 100644
--- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
+++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
@@ -48,6 +48,12 @@ public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
"mapred.tasktracker.cumulativecputime.testing";
/** CPU usage percentage for testing */
public static final String CPU_USAGE = "mapred.tasktracker.cpuusage.testing";
+ /** cumulative number of bytes read over the network */
+ public static final String NETWORK_BYTES_READ =
+ "mapred.tasktracker.networkread.testing";
+ /** cumulative number of bytes written over the network */
+ public static final String NETWORK_BYTES_WRITTEN =
+ "mapred.tasktracker.networkwritten.testing";
/** process cumulative CPU usage time for testing */
public static final String PROC_CUMULATIVE_CPU_TIME =
"mapred.tasktracker.proccumulativecputime.testing";
@@ -111,4 +117,17 @@ public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
public float getCpuUsage() {
return getConf().getFloat(CPU_USAGE, -1);
}
+
+ /** {@inheritDoc} */
+ @Override
+ public long getNetworkBytesRead() {
+ return getConf().getLong(NETWORK_BYTES_READ, -1);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getNetworkBytesWritten() {
+ return getConf().getLong(NETWORK_BYTES_WRITTEN, -1);
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a0752d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index 5e5f1b4..21724a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -124,6 +124,22 @@ public class ResourceCalculatorPlugin extends Configured {
return sys.getCpuUsage();
}
+ /**
+ * Obtain the aggregated number of bytes read over the network.
+ * @return total number of bytes read.
+ */
+ public long getNetworkBytesRead() {
+ return sys.getNetworkBytesRead();
+ }
+
+ /**
+ * Obtain the aggregated number of bytes written to the network.
+ * @return total number of bytes written.
+ */
+ public long getNetworkBytesWritten() {
+ return sys.getNetworkBytesWritten();
+ }
+
/**
* Create the ResourceCalculatorPlugin from the class name and configure it. If
* class name is null, this method will try and return a memory calculator
[10/21] hadoop git commit: YARN-1012. Report NM aggregated container
resource utilization in heartbeat. (Inigo Goiri via kasha)
Posted by aw...@apache.org.
YARN-1012. Report NM aggregated container resource utilization in heartbeat. (Inigo Goiri via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/527c40e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/527c40e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/527c40e4
Branch: refs/heads/HADOOP-12111
Commit: 527c40e4d664c721b8f32d7cd8df21b2666fea8a
Parents: fffb15b
Author: Karthik Kambatla <ka...@apache.org>
Authored: Thu Jul 9 09:35:14 2015 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Thu Jul 9 09:35:14 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../yarn/client/TestResourceTrackerOnHA.java | 2 +-
.../yarn/server/api/records/NodeStatus.java | 43 +++++-
.../server/api/records/ResourceUtilization.java | 133 +++++++++++++++++++
.../api/records/impl/pb/NodeStatusPBImpl.java | 34 ++++-
.../impl/pb/ResourceUtilizationPBImpl.java | 104 +++++++++++++++
.../yarn/server/api/records/package-info.java | 19 +++
.../main/proto/yarn_server_common_protos.proto | 7 +
.../nodemanager/NodeStatusUpdaterImpl.java | 19 ++-
.../monitor/ContainersMonitor.java | 3 +-
.../monitor/ContainersMonitorImpl.java | 28 ++++
11 files changed, 387 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d1960e6..19f0854 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -131,6 +131,9 @@ Release 2.8.0 - UNRELEASED
YARN-41. The RM should handle the graceful shutdown of the NM. (Devaraj K via
junping_du)
+ YARN-1012. Report NM aggregated container resource utilization in heartbeat.
+ (Inigo Goiri via kasha)
+
IMPROVEMENTS
YARN-644. Basic null check is not performed on passed in arguments before
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
index 8167a58..c51570c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
@@ -68,7 +68,7 @@ public class TestResourceTrackerOnHA extends ProtocolHATestBase{
failoverThread = createAndStartFailoverThread();
NodeStatus status =
NodeStatus.newInstance(NodeId.newInstance("localhost", 0), 0, null,
- null, null);
+ null, null, null);
NodeHeartbeatRequest request2 =
NodeHeartbeatRequest.newInstance(status, null, null,null);
resourceTracker.nodeHeartbeat(request2);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
index aad819d..38b0381 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
@@ -19,24 +19,48 @@ package org.apache.hadoop.yarn.server.api.records;
import java.util.List;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.Records;
-
+/**
+ * {@code NodeStatus} is a summary of the status of the node.
+ * <p>
+ * It includes information such as:
+ * <ul>
+ * <li>Node information and status..</li>
+ * <li>Container status.</li>
+ * </ul>
+ */
public abstract class NodeStatus {
-
+
+ /**
+ * Create a new {@code NodeStatus}.
+ * @param nodeId Identifier for this node.
+ * @param responseId Identifier for the response.
+ * @param containerStatuses Status of the containers running in this node.
+ * @param keepAliveApplications Applications to keep alive.
+ * @param nodeHealthStatus Health status of the node.
+ * @param containersUtilizations Utilization of the containers in this node.
+ * @return New {@code NodeStatus} with the provided information.
+ */
public static NodeStatus newInstance(NodeId nodeId, int responseId,
List<ContainerStatus> containerStatuses,
List<ApplicationId> keepAliveApplications,
- NodeHealthStatus nodeHealthStatus) {
+ NodeHealthStatus nodeHealthStatus,
+ ResourceUtilization containersUtilization) {
NodeStatus nodeStatus = Records.newRecord(NodeStatus.class);
nodeStatus.setResponseId(responseId);
nodeStatus.setNodeId(nodeId);
nodeStatus.setContainersStatuses(containerStatuses);
nodeStatus.setKeepAliveApplications(keepAliveApplications);
nodeStatus.setNodeHealthStatus(nodeHealthStatus);
+ nodeStatus.setContainersUtilization(containersUtilization);
return nodeStatus;
}
@@ -55,4 +79,17 @@ public abstract class NodeStatus {
public abstract void setNodeId(NodeId nodeId);
public abstract void setResponseId(int responseId);
+
+ /**
+ * Get the <em>resource utilization</em> of the containers.
+ * @return <em>resource utilization</em> of the containers
+ */
+ @Public
+ @Stable
+ public abstract ResourceUtilization getContainersUtilization();
+
+ @Private
+ @Unstable
+ public abstract void setContainersUtilization(
+ ResourceUtilization containersUtilization);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/ResourceUtilization.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/ResourceUtilization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/ResourceUtilization.java
new file mode 100644
index 0000000..39896a3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/ResourceUtilization.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * <p>
+ * <code>ResourceUtilization</code> models the utilization of a set of computer
+ * resources in the cluster.
+ * </p>
+ */
+@Private
+@Evolving
+public abstract class ResourceUtilization implements
+ Comparable<ResourceUtilization> {
+ public static ResourceUtilization newInstance(int pmem, int vmem, float cpu) {
+ ResourceUtilization utilization =
+ Records.newRecord(ResourceUtilization.class);
+ utilization.setPhysicalMemory(pmem);
+ utilization.setVirtualMemory(vmem);
+ utilization.setCPU(cpu);
+ return utilization;
+ }
+
+ /**
+ * Get used <em>virtual memory</em>.
+ *
+ * @return <em>virtual memory</em> in MB
+ */
+ public abstract int getVirtualMemory();
+
+ /**
+ * Set used <em>virtual memory</em>.
+ *
+ * @param vmem <em>virtual memory</em> in MB
+ */
+ public abstract void setVirtualMemory(int vmem);
+
+ /**
+ * Get <em>physical memory</em>.
+ *
+ * @return <em>physical memory</em> in MB
+ */
+ public abstract int getPhysicalMemory();
+
+ /**
+ * Set <em>physical memory</em>.
+ *
+ * @param pmem <em>physical memory</em> in MB
+ */
+ public abstract void setPhysicalMemory(int pmem);
+
+ /**
+ * Get <em>CPU</em> utilization.
+ *
+ * @return <em>CPU utilization</em> normalized to 1 CPU
+ */
+ public abstract float getCPU();
+
+ /**
+ * Set <em>CPU</em> utilization.
+ *
+ * @param cpu <em>CPU utilization</em> normalized to 1 CPU
+ */
+ public abstract void setCPU(float cpu);
+
+ @Override
+ public int hashCode() {
+ final int prime = 263167;
+ int result = 3571;
+ result = prime * result + getVirtualMemory();
+ result = prime * result + getPhysicalMemory();
+ result = 31 * result + Float.valueOf(getCPU()).hashCode();
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (!(obj instanceof ResourceUtilization)) {
+ return false;
+ }
+ ResourceUtilization other = (ResourceUtilization) obj;
+ if (getVirtualMemory() != other.getVirtualMemory()
+ || getPhysicalMemory() != other.getPhysicalMemory()
+ || getCPU() != other.getCPU()) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "<pmem:" + getPhysicalMemory() + ", vmem:" + getVirtualMemory()
+ + ", vCores:" + getCPU() + ">";
+ }
+
+ /**
+ * Add utilization to the current one.
+ * @param pmem Physical memory used to add.
+ * @param vmem Virtual memory used to add.
+ * @param cpu CPU utilization to add.
+ */
+ public void addTo(int pmem, int vmem, float cpu) {
+ this.setPhysicalMemory(this.getPhysicalMemory() + pmem);
+ this.setVirtualMemory(this.getVirtualMemory() + vmem);
+ this.setCPU(this.getCPU() + cpu);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
index 65376dc..fffd6a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
@@ -35,9 +35,10 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProto;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
-
+import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
public class NodeStatusPBImpl extends NodeStatus {
NodeStatusProto proto = NodeStatusProto.getDefaultInstance();
@@ -291,6 +292,28 @@ public class NodeStatusPBImpl extends NodeStatus {
this.nodeHealthStatus = healthStatus;
}
+ @Override
+ public ResourceUtilization getContainersUtilization() {
+ NodeStatusProtoOrBuilder p =
+ this.viaProto ? this.proto : this.builder;
+ if (!p.hasContainersUtilization()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getContainersUtilization());
+ }
+
+ @Override
+ public void setContainersUtilization(
+ ResourceUtilization containersUtilization) {
+ maybeInitBuilder();
+ if (containersUtilization == null) {
+ this.builder.clearContainersUtilization();
+ return;
+ }
+ this.builder
+ .setContainersUtilization(convertToProtoFormat(containersUtilization));
+ }
+
private NodeIdProto convertToProtoFormat(NodeId nodeId) {
return ((NodeIdPBImpl)nodeId).getProto();
}
@@ -323,4 +346,13 @@ public class NodeStatusPBImpl extends NodeStatus {
private ApplicationIdProto convertToProtoFormat(ApplicationId c) {
return ((ApplicationIdPBImpl)c).getProto();
}
+
+ private ResourceUtilizationProto convertToProtoFormat(ResourceUtilization r) {
+ return ((ResourceUtilizationPBImpl) r).getProto();
+ }
+
+ private ResourceUtilizationPBImpl convertFromProtoFormat(
+ ResourceUtilizationProto p) {
+ return new ResourceUtilizationPBImpl(p);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/ResourceUtilizationPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/ResourceUtilizationPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/ResourceUtilizationPBImpl.java
new file mode 100644
index 0000000..01cda7a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/ResourceUtilizationPBImpl.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.ResourceUtilizationProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
+
+@Private
+@Unstable
+public class ResourceUtilizationPBImpl extends ResourceUtilization {
+ private ResourceUtilizationProto proto = ResourceUtilizationProto
+ .getDefaultInstance();
+ private ResourceUtilizationProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ public ResourceUtilizationPBImpl() {
+ builder = ResourceUtilizationProto.newBuilder();
+ }
+
+ public ResourceUtilizationPBImpl(ResourceUtilizationProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ResourceUtilizationProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = ResourceUtilizationProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public int getPhysicalMemory() {
+ ResourceUtilizationProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getPmem());
+ }
+
+ @Override
+ public void setPhysicalMemory(int pmem) {
+ maybeInitBuilder();
+ builder.setPmem(pmem);
+ }
+
+ @Override
+ public int getVirtualMemory() {
+ ResourceUtilizationProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getVmem());
+ }
+
+ @Override
+ public void setVirtualMemory(int vmem) {
+ maybeInitBuilder();
+ builder.setPmem(vmem);
+ }
+
+ @Override
+ public float getCPU() {
+ ResourceUtilizationProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getCpu();
+ }
+
+ @Override
+ public void setCPU(float cpu) {
+ maybeInitBuilder();
+ builder.setCpu(cpu);
+ }
+
+ @Override
+ public int compareTo(ResourceUtilization other) {
+ int diff = this.getPhysicalMemory() - other.getPhysicalMemory();
+ if (diff == 0) {
+ diff = this.getVirtualMemory() - other.getVirtualMemory();
+ if (diff == 0) {
+ diff = Float.compare(this.getCPU(), other.getCPU());
+ }
+ }
+ return diff;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/package-info.java
new file mode 100644
index 0000000..bf8497f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/package-info.java
@@ -0,0 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/** Server records. */
+package org.apache.hadoop.yarn.server.api.records;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
index 99149ac..a810813 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
@@ -36,6 +36,7 @@ message NodeStatusProto {
repeated ContainerStatusProto containersStatuses = 3;
optional NodeHealthStatusProto nodeHealthStatus = 4;
repeated ApplicationIdProto keep_alive_applications = 5;
+ optional ResourceUtilizationProto containers_utilization = 6;
}
message MasterKeyProto {
@@ -52,4 +53,10 @@ message NodeHealthStatusProto {
message VersionProto {
optional int32 major_version = 1;
optional int32 minor_version = 2;
+}
+
+message ResourceUtilizationProto {
+ optional int32 pmem = 1;
+ optional int32 vmem = 2;
+ optional float cpu = 3;
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 3721b0e..30a2bd5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -73,13 +73,14 @@ import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider;
-import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
@@ -429,13 +430,27 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
+ ", " + nodeHealthStatus.getHealthReport());
}
List<ContainerStatus> containersStatuses = getContainerStatuses();
+ ResourceUtilization containersUtilization = getContainersUtilization();
NodeStatus nodeStatus =
NodeStatus.newInstance(nodeId, responseId, containersStatuses,
- createKeepAliveApplicationList(), nodeHealthStatus);
+ createKeepAliveApplicationList(), nodeHealthStatus,
+ containersUtilization);
return nodeStatus;
}
+ /**
+ * Get the aggregated utilization of the containers in this node.
+ * @return Resource utilization of all the containers.
+ */
+ private ResourceUtilization getContainersUtilization() {
+ ContainerManagerImpl containerManager =
+ (ContainerManagerImpl) this.context.getContainerManager();
+ ContainersMonitor containersMonitor =
+ containerManager.getContainersMonitor();
+ return containersMonitor.getContainersUtilization();
+ }
+
// Iterate through the NMContext and clone and get all the containers'
// statuses. If it's a completed container, add into the
// recentlyStoppedContainers collections.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java
index d3e2bf2..f0dd2e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitor.java
@@ -19,10 +19,11 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import org.apache.hadoop.service.Service;
+import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
public interface ContainersMonitor extends Service,
EventHandler<ContainersMonitorEvent>, ResourceView {
-
+ public ResourceUtilization getContainersUtilization();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/527c40e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index b5f154d..57d1bad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.server.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
@@ -78,6 +79,8 @@ public class ContainersMonitorImpl extends AbstractService implements
private static final long UNKNOWN_MEMORY_LIMIT = -1L;
private int nodeCpuPercentageForYARN;
+ private ResourceUtilization containersUtilization;
+
public ContainersMonitorImpl(ContainerExecutor exec,
AsyncDispatcher dispatcher, Context context) {
super("containers-monitor");
@@ -89,6 +92,8 @@ public class ContainersMonitorImpl extends AbstractService implements
this.containersToBeAdded = new HashMap<ContainerId, ProcessTreeInfo>();
this.containersToBeRemoved = new ArrayList<ContainerId>();
this.monitoringThread = new MonitoringThread();
+
+ this.containersUtilization = ResourceUtilization.newInstance(0, 0, 0.0f);
}
@Override
@@ -384,6 +389,11 @@ public class ContainersMonitorImpl extends AbstractService implements
containersToBeRemoved.clear();
}
+ // Temporary structure to calculate the total resource utilization of
+ // the containers
+ ResourceUtilization trackedContainersUtilization =
+ ResourceUtilization.newInstance(0, 0, 0.0f);
+
// Now do the monitoring for the trackingContainers
// Check memory usage and kill any overflowing containers
long vmemUsageByAllContainers = 0;
@@ -463,6 +473,12 @@ public class ContainersMonitorImpl extends AbstractService implements
currentPmemUsage, pmemLimit));
}
+ // Add resource utilization for this container
+ trackedContainersUtilization.addTo(
+ (int) (currentPmemUsage >> 20),
+ (int) (currentVmemUsage >> 20),
+ milliVcoresUsed / 1000.0f);
+
// Add usage to container metrics
if (containerMetricsEnabled) {
ContainerMetrics.forContainer(
@@ -542,6 +558,9 @@ public class ContainersMonitorImpl extends AbstractService implements
+ cpuUsagePercentPerCoreByAllContainers);
}
+ // Save the aggregated utilization of the containers
+ setContainersUtilization(trackedContainersUtilization);
+
try {
Thread.sleep(monitoringInterval);
} catch (InterruptedException e) {
@@ -614,6 +633,15 @@ public class ContainersMonitorImpl extends AbstractService implements
}
@Override
+ public ResourceUtilization getContainersUtilization() {
+ return this.containersUtilization;
+ }
+
+ public void setContainersUtilization(ResourceUtilization utilization) {
+ this.containersUtilization = utilization;
+ }
+
+ @Override
public void handle(ContainersMonitorEvent monitoringEvent) {
if (!isEnabled()) {
[07/21] hadoop git commit: HADOOP-12200.
TestCryptoStreamsWithOpensslAesCtrCryptoCodec should be skipped in non-native
profile. Contributed by Masatake Iwasaki.
Posted by aw...@apache.org.
HADOOP-12200. TestCryptoStreamsWithOpensslAesCtrCryptoCodec should be skipped in non-native profile. Contributed by Masatake Iwasaki.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b7692c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b7692c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b7692c4
Branch: refs/heads/HADOOP-12111
Commit: 6b7692c468931215ec0de7e11f0bdf15b80e68ce
Parents: b8832fc
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Jul 9 13:14:53 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Jul 9 13:14:53 2015 +0900
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java | 3 +++
2 files changed, 6 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b7692c4/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index c99fb5e..a906a63 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -948,6 +948,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12117. Potential NPE from Configuration#loadProperty with
allowNullValueProperties set. (zhihai xu via vinayakumarb)
+ HADOOP-12200. TestCryptoStreamsWithOpensslAesCtrCryptoCodec should be
+ skipped in non-native profile. (Masatake Iwasaki via aajisaka)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b7692c4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
index 684ec09..d5f25b8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
@@ -19,16 +19,19 @@ package org.apache.hadoop.crypto;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.util.NativeCodeLoader;
import org.junit.BeforeClass;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assume.assumeTrue;
public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec
extends TestCryptoStreams {
@BeforeClass
public static void init() throws Exception {
+ assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
Configuration conf = new Configuration();
conf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
[13/21] hadoop git commit: HADOOP-12180. Move
ResourceCalculatorPlugin from YARN to Common. (Chris Douglas via kasha)
Posted by aw...@apache.org.
HADOOP-12180. Move ResourceCalculatorPlugin from YARN to Common. (Chris Douglas via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac604837
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac604837
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac604837
Branch: refs/heads/HADOOP-12111
Commit: ac6048372a58b3a3b57cd5f2702b44a3d4667f3d
Parents: aa067c6
Author: Karthik Kambatla <ka...@apache.org>
Authored: Thu Jul 9 09:56:40 2015 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Thu Jul 9 09:56:40 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../org/apache/hadoop/util/CpuTimeTracker.java | 115 +++++
.../java/org/apache/hadoop/util/SysInfo.java | 111 +++++
.../org/apache/hadoop/util/SysInfoLinux.java | 444 +++++++++++++++++++
.../org/apache/hadoop/util/SysInfoWindows.java | 181 ++++++++
.../apache/hadoop/util/TestSysInfoLinux.java | 323 ++++++++++++++
.../apache/hadoop/util/TestSysInfoWindows.java | 100 +++++
.../apache/hadoop/yarn/util/CpuTimeTracker.java | 100 -----
.../util/LinuxResourceCalculatorPlugin.java | 392 +---------------
.../yarn/util/ProcfsBasedProcessTree.java | 34 +-
.../yarn/util/ResourceCalculatorPlugin.java | 68 +--
.../yarn/util/WindowsBasedProcessTree.java | 2 +-
.../util/WindowsResourceCalculatorPlugin.java | 158 +------
.../util/TestLinuxResourceCalculatorPlugin.java | 324 --------------
.../util/TestResourceCalculatorProcessTree.java | 2 +-
.../TestWindowsResourceCalculatorPlugin.java | 86 ----
16 files changed, 1335 insertions(+), 1108 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a906a63..d9a9eba 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -690,6 +690,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12201. Add tracing to FileSystem#createFileSystem and Globber#glob
(cmccabe)
+ HADOOP-12180. Move ResourceCalculatorPlugin from YARN to Common.
+ (Chris Douglas via kasha)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java
new file mode 100644
index 0000000..3f17c9a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.math.BigInteger;
+
+/**
+ * Utility for sampling and computing CPU usage.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class CpuTimeTracker {
+ public static final int UNAVAILABLE = -1;
+ private final long minimumTimeInterval;
+
+ // CPU used time since system is on (ms)
+ private BigInteger cumulativeCpuTime = BigInteger.ZERO;
+
+ // CPU used time read last time (ms)
+ private BigInteger lastCumulativeCpuTime = BigInteger.ZERO;
+
+ // Unix timestamp while reading the CPU time (ms)
+ private long sampleTime;
+ private long lastSampleTime;
+ private float cpuUsage;
+ private BigInteger jiffyLengthInMillis;
+
+ public CpuTimeTracker(long jiffyLengthInMillis) {
+ this.jiffyLengthInMillis = BigInteger.valueOf(jiffyLengthInMillis);
+ this.cpuUsage = UNAVAILABLE;
+ this.sampleTime = UNAVAILABLE;
+ this.lastSampleTime = UNAVAILABLE;
+ minimumTimeInterval = 10 * jiffyLengthInMillis;
+ }
+
+ /**
+ * Return percentage of cpu time spent over the time since last update.
+ * CPU time spent is based on elapsed jiffies multiplied by amount of
+ * time for 1 core. Thus, if you use 2 cores completely you would have spent
+ * twice the actual time between updates and this will return 200%.
+ *
+ * @return Return percentage of cpu usage since last update, {@link
+ * CpuTimeTracker#UNAVAILABLE} if there haven't been 2 updates more than
+ * {@link CpuTimeTracker#minimumTimeInterval} apart
+ */
+ public float getCpuTrackerUsagePercent() {
+ if (lastSampleTime == UNAVAILABLE ||
+ lastSampleTime > sampleTime) {
+ // lastSampleTime > sampleTime may happen when the system time is changed
+ lastSampleTime = sampleTime;
+ lastCumulativeCpuTime = cumulativeCpuTime;
+ return cpuUsage;
+ }
+ // When lastSampleTime is sufficiently old, update cpuUsage.
+ // Also take a sample of the current time and cumulative CPU time for the
+ // use of the next calculation.
+ if (sampleTime > lastSampleTime + minimumTimeInterval) {
+ cpuUsage =
+ ((cumulativeCpuTime.subtract(lastCumulativeCpuTime)).floatValue())
+ * 100F / ((float) (sampleTime - lastSampleTime));
+ lastSampleTime = sampleTime;
+ lastCumulativeCpuTime = cumulativeCpuTime;
+ }
+ return cpuUsage;
+ }
+
+ /**
+ * Obtain the cumulative CPU time since the system is on.
+ * @return cumulative CPU time in milliseconds
+ */
+ public long getCumulativeCpuTime() {
+ return cumulativeCpuTime.longValue();
+ }
+
+ /**
+ * Apply delta to accumulators.
+ * @param elapsedJiffies updated jiffies
+ * @param newTime new sample time
+ */
+ public void updateElapsedJiffies(BigInteger elapsedJiffies, long newTime) {
+ cumulativeCpuTime = elapsedJiffies.multiply(jiffyLengthInMillis);
+ sampleTime = newTime;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("SampleTime " + this.sampleTime);
+ sb.append(" CummulativeCpuTime " + this.cumulativeCpuTime);
+ sb.append(" LastSampleTime " + this.lastSampleTime);
+ sb.append(" LastCummulativeCpuTime " + this.lastCumulativeCpuTime);
+ sb.append(" CpuUsage " + this.cpuUsage);
+ sb.append(" JiffyLengthMillisec " + this.jiffyLengthInMillis);
+ return sb.toString();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
new file mode 100644
index 0000000..ec7fb24
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Plugin to calculate resource information on the system.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class SysInfo {
+
+ /**
+ * Return default OS instance.
+ * @throws UnsupportedOperationException If cannot determine OS.
+ * @return Default instance for the detected OS.
+ */
+ public static SysInfo newInstance() {
+ if (Shell.LINUX) {
+ return new SysInfoLinux();
+ }
+ if (Shell.WINDOWS) {
+ return new SysInfoWindows();
+ }
+ throw new UnsupportedOperationException("Could not determine OS");
+ }
+
+ /**
+ * Obtain the total size of the virtual memory present in the system.
+ *
+ * @return virtual memory size in bytes.
+ */
+ public abstract long getVirtualMemorySize();
+
+ /**
+ * Obtain the total size of the physical memory present in the system.
+ *
+ * @return physical memory size bytes.
+ */
+ public abstract long getPhysicalMemorySize();
+
+ /**
+ * Obtain the total size of the available virtual memory present
+ * in the system.
+ *
+ * @return available virtual memory size in bytes.
+ */
+ public abstract long getAvailableVirtualMemorySize();
+
+ /**
+ * Obtain the total size of the available physical memory present
+ * in the system.
+ *
+ * @return available physical memory size bytes.
+ */
+ public abstract long getAvailablePhysicalMemorySize();
+
+ /**
+ * Obtain the total number of logical processors present on the system.
+ *
+ * @return number of logical processors
+ */
+ public abstract int getNumProcessors();
+
+ /**
+ * Obtain total number of physical cores present on the system.
+ *
+ * @return number of physical cores
+ */
+ public abstract int getNumCores();
+
+ /**
+ * Obtain the CPU frequency of on the system.
+ *
+ * @return CPU frequency in kHz
+ */
+ public abstract long getCpuFrequency();
+
+ /**
+ * Obtain the cumulative CPU time since the system is on.
+ *
+ * @return cumulative CPU time in milliseconds
+ */
+ public abstract long getCumulativeCpuTime();
+
+ /**
+ * Obtain the CPU usage % of the machine. Return -1 if it is unavailable
+ *
+ * @return CPU usage as a percentage of available cycles.
+ */
+ public abstract float getCpuUsage();
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
new file mode 100644
index 0000000..055298d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
@@ -0,0 +1,444 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.BufferedReader;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.charset.Charset;
+import java.util.HashSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+
+/**
+ * Plugin to calculate resource information on Linux systems.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class SysInfoLinux extends SysInfo {
+ private static final Log LOG =
+ LogFactory.getLog(SysInfoLinux.class);
+
+ /**
+ * proc's meminfo virtual file has keys-values in the format
+ * "key:[ \t]*value[ \t]kB".
+ */
+ private static final String PROCFS_MEMFILE = "/proc/meminfo";
+ private static final Pattern PROCFS_MEMFILE_FORMAT =
+ Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB");
+
+ // We need the values for the following keys in meminfo
+ private static final String MEMTOTAL_STRING = "MemTotal";
+ private static final String SWAPTOTAL_STRING = "SwapTotal";
+ private static final String MEMFREE_STRING = "MemFree";
+ private static final String SWAPFREE_STRING = "SwapFree";
+ private static final String INACTIVE_STRING = "Inactive";
+
+ /**
+ * Patterns for parsing /proc/cpuinfo.
+ */
+ private static final String PROCFS_CPUINFO = "/proc/cpuinfo";
+ private static final Pattern PROCESSOR_FORMAT =
+ Pattern.compile("^processor[ \t]:[ \t]*([0-9]*)");
+ private static final Pattern FREQUENCY_FORMAT =
+ Pattern.compile("^cpu MHz[ \t]*:[ \t]*([0-9.]*)");
+ private static final Pattern PHYSICAL_ID_FORMAT =
+ Pattern.compile("^physical id[ \t]*:[ \t]*([0-9]*)");
+ private static final Pattern CORE_ID_FORMAT =
+ Pattern.compile("^core id[ \t]*:[ \t]*([0-9]*)");
+
+ /**
+ * Pattern for parsing /proc/stat.
+ */
+ private static final String PROCFS_STAT = "/proc/stat";
+ private static final Pattern CPU_TIME_FORMAT =
+ Pattern.compile("^cpu[ \t]*([0-9]*)" +
+ "[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
+ private CpuTimeTracker cpuTimeTracker;
+
+ private String procfsMemFile;
+ private String procfsCpuFile;
+ private String procfsStatFile;
+ private long jiffyLengthInMillis;
+
+ private long ramSize = 0;
+ private long swapSize = 0;
+ private long ramSizeFree = 0; // free ram space on the machine (kB)
+ private long swapSizeFree = 0; // free swap space on the machine (kB)
+ private long inactiveSize = 0; // inactive cache memory (kB)
+ /* number of logical processors on the system. */
+ private int numProcessors = 0;
+ /* number of physical cores on the system. */
+ private int numCores = 0;
+ private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
+
+ private boolean readMemInfoFile = false;
+ private boolean readCpuInfoFile = false;
+
+ public static final long PAGE_SIZE = getConf("PAGESIZE");
+ public static final long JIFFY_LENGTH_IN_MILLIS =
+ Math.max(Math.round(1000D / getConf("CLK_TCK")), -1);
+
+ private static long getConf(String attr) {
+ if(Shell.LINUX) {
+ try {
+ ShellCommandExecutor shellExecutorClk = new ShellCommandExecutor(
+ new String[] {"getconf", attr });
+ shellExecutorClk.execute();
+ return Long.parseLong(shellExecutorClk.getOutput().replace("\n", ""));
+ } catch (IOException|NumberFormatException e) {
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+ /**
+ * Get current time.
+ * @return Unix time stamp in millisecond
+ */
+ long getCurrentTime() {
+ return System.currentTimeMillis();
+ }
+
+ public SysInfoLinux() {
+ this(PROCFS_MEMFILE, PROCFS_CPUINFO, PROCFS_STAT,
+ JIFFY_LENGTH_IN_MILLIS);
+ }
+
+ /**
+ * Constructor which allows assigning the /proc/ directories. This will be
+ * used only in unit tests.
+ * @param procfsMemFile fake file for /proc/meminfo
+ * @param procfsCpuFile fake file for /proc/cpuinfo
+ * @param procfsStatFile fake file for /proc/stat
+ * @param jiffyLengthInMillis fake jiffy length value
+ */
+ @VisibleForTesting
+ public SysInfoLinux(String procfsMemFile,
+ String procfsCpuFile,
+ String procfsStatFile,
+ long jiffyLengthInMillis) {
+ this.procfsMemFile = procfsMemFile;
+ this.procfsCpuFile = procfsCpuFile;
+ this.procfsStatFile = procfsStatFile;
+ this.jiffyLengthInMillis = jiffyLengthInMillis;
+ this.cpuTimeTracker = new CpuTimeTracker(jiffyLengthInMillis);
+ }
+
+ /**
+ * Read /proc/meminfo, parse and compute memory information only once.
+ */
+ private void readProcMemInfoFile() {
+ readProcMemInfoFile(false);
+ }
+
+ /**
+ * Read /proc/meminfo, parse and compute memory information.
+ * @param readAgain if false, read only on the first time
+ */
+ private void readProcMemInfoFile(boolean readAgain) {
+
+ if (readMemInfoFile && !readAgain) {
+ return;
+ }
+
+ // Read "/proc/memInfo" file
+ BufferedReader in;
+ InputStreamReader fReader;
+ try {
+ fReader = new InputStreamReader(
+ new FileInputStream(procfsMemFile), Charset.forName("UTF-8"));
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ LOG.warn("Couldn't read " + procfsMemFile
+ + "; can't determine memory settings");
+ return;
+ }
+
+ Matcher mat;
+
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCFS_MEMFILE_FORMAT.matcher(str);
+ if (mat.find()) {
+ if (mat.group(1).equals(MEMTOTAL_STRING)) {
+ ramSize = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
+ swapSize = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(MEMFREE_STRING)) {
+ ramSizeFree = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(SWAPFREE_STRING)) {
+ swapSizeFree = Long.parseLong(mat.group(2));
+ } else if (mat.group(1).equals(INACTIVE_STRING)) {
+ inactiveSize = Long.parseLong(mat.group(2));
+ }
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+
+ readMemInfoFile = true;
+ }
+
+ /**
+ * Read /proc/cpuinfo, parse and calculate CPU information.
+ */
+ private void readProcCpuInfoFile() {
+ // This directory needs to be read only once
+ if (readCpuInfoFile) {
+ return;
+ }
+ HashSet<String> coreIdSet = new HashSet<>();
+ // Read "/proc/cpuinfo" file
+ BufferedReader in;
+ InputStreamReader fReader;
+ try {
+ fReader = new InputStreamReader(
+ new FileInputStream(procfsCpuFile), Charset.forName("UTF-8"));
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ LOG.warn("Couldn't read " + procfsCpuFile + "; can't determine cpu info");
+ return;
+ }
+ Matcher mat;
+ try {
+ numProcessors = 0;
+ numCores = 1;
+ String currentPhysicalId = "";
+ String str = in.readLine();
+ while (str != null) {
+ mat = PROCESSOR_FORMAT.matcher(str);
+ if (mat.find()) {
+ numProcessors++;
+ }
+ mat = FREQUENCY_FORMAT.matcher(str);
+ if (mat.find()) {
+ cpuFrequency = (long)(Double.parseDouble(mat.group(1)) * 1000); // kHz
+ }
+ mat = PHYSICAL_ID_FORMAT.matcher(str);
+ if (mat.find()) {
+ currentPhysicalId = str;
+ }
+ mat = CORE_ID_FORMAT.matcher(str);
+ if (mat.find()) {
+ coreIdSet.add(currentPhysicalId + " " + str);
+ numCores = coreIdSet.size();
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ readCpuInfoFile = true;
+ }
+
+ /**
+ * Read /proc/stat file, parse and calculate cumulative CPU.
+ */
+ private void readProcStatFile() {
+ // Read "/proc/stat" file
+ BufferedReader in;
+ InputStreamReader fReader;
+ try {
+ fReader = new InputStreamReader(
+ new FileInputStream(procfsStatFile), Charset.forName("UTF-8"));
+ in = new BufferedReader(fReader);
+ } catch (FileNotFoundException f) {
+ // shouldn't happen....
+ return;
+ }
+
+ Matcher mat;
+ try {
+ String str = in.readLine();
+ while (str != null) {
+ mat = CPU_TIME_FORMAT.matcher(str);
+ if (mat.find()) {
+ long uTime = Long.parseLong(mat.group(1));
+ long nTime = Long.parseLong(mat.group(2));
+ long sTime = Long.parseLong(mat.group(3));
+ cpuTimeTracker.updateElapsedJiffies(
+ BigInteger.valueOf(uTime + nTime + sTime),
+ getCurrentTime());
+ break;
+ }
+ str = in.readLine();
+ }
+ } catch (IOException io) {
+ LOG.warn("Error reading the stream " + io);
+ } finally {
+ // Close the streams
+ try {
+ fReader.close();
+ try {
+ in.close();
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + in);
+ }
+ } catch (IOException i) {
+ LOG.warn("Error closing the stream " + fReader);
+ }
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getPhysicalMemorySize() {
+ readProcMemInfoFile();
+ return ramSize * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getVirtualMemorySize() {
+ readProcMemInfoFile();
+ return (ramSize + swapSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ readProcMemInfoFile(true);
+ return (ramSizeFree + inactiveSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ readProcMemInfoFile(true);
+ return (ramSizeFree + swapSizeFree + inactiveSize) * 1024;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumProcessors() {
+ readProcCpuInfoFile();
+ return numProcessors;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumCores() {
+ readProcCpuInfoFile();
+ return numCores;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCpuFrequency() {
+ readProcCpuInfoFile();
+ return cpuFrequency;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCumulativeCpuTime() {
+ readProcStatFile();
+ return cpuTimeTracker.getCumulativeCpuTime();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public float getCpuUsage() {
+ readProcStatFile();
+ float overallCpuUsage = cpuTimeTracker.getCpuTrackerUsagePercent();
+ if (overallCpuUsage != CpuTimeTracker.UNAVAILABLE) {
+ overallCpuUsage = overallCpuUsage / getNumProcessors();
+ }
+ return overallCpuUsage;
+ }
+
+ /**
+ * Test the {@link SysInfoLinux}.
+ *
+ * @param args - arguments to this calculator test
+ */
+ public static void main(String[] args) {
+ SysInfoLinux plugin = new SysInfoLinux();
+ System.out.println("Physical memory Size (bytes) : "
+ + plugin.getPhysicalMemorySize());
+ System.out.println("Total Virtual memory Size (bytes) : "
+ + plugin.getVirtualMemorySize());
+ System.out.println("Available Physical memory Size (bytes) : "
+ + plugin.getAvailablePhysicalMemorySize());
+ System.out.println("Total Available Virtual memory Size (bytes) : "
+ + plugin.getAvailableVirtualMemorySize());
+ System.out.println("Number of Processors : " + plugin.getNumProcessors());
+ System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency());
+ System.out.println("Cumulative CPU time (ms) : " +
+ plugin.getCumulativeCpuTime());
+ try {
+ // Sleep so we can compute the CPU usage
+ Thread.sleep(500L);
+ } catch (InterruptedException e) {
+ // do nothing
+ }
+ System.out.println("CPU usage % : " + plugin.getCpuUsage());
+ }
+
+ @VisibleForTesting
+ void setReadCpuInfoFile(boolean readCpuInfoFileValue) {
+ this.readCpuInfoFile = readCpuInfoFileValue;
+ }
+
+ public long getJiffyLengthInMillis() {
+ return this.jiffyLengthInMillis;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
new file mode 100644
index 0000000..da4c1c5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.io.IOException;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+
+/**
+ * Plugin to calculate resource information on Windows systems.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class SysInfoWindows extends SysInfo {
+
+ private static final Log LOG = LogFactory.getLog(SysInfoWindows.class);
+
+ private long vmemSize;
+ private long memSize;
+ private long vmemAvailable;
+ private long memAvailable;
+ private int numProcessors;
+ private long cpuFrequencyKhz;
+ private long cumulativeCpuTimeMs;
+ private float cpuUsage;
+
+ private long lastRefreshTime;
+ static final int REFRESH_INTERVAL_MS = 1000;
+
+ public SysInfoWindows() {
+ lastRefreshTime = 0;
+ reset();
+ }
+
+ @VisibleForTesting
+ long now() {
+ return System.nanoTime();
+ }
+
+ void reset() {
+ vmemSize = -1;
+ memSize = -1;
+ vmemAvailable = -1;
+ memAvailable = -1;
+ numProcessors = -1;
+ cpuFrequencyKhz = -1;
+ cumulativeCpuTimeMs = -1;
+ cpuUsage = -1;
+ }
+
+ String getSystemInfoInfoFromShell() {
+ ShellCommandExecutor shellExecutor = new ShellCommandExecutor(
+ new String[] {Shell.WINUTILS, "systeminfo" });
+ try {
+ shellExecutor.execute();
+ return shellExecutor.getOutput();
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ return null;
+ }
+
+ void refreshIfNeeded() {
+ long now = now();
+ if (now - lastRefreshTime > REFRESH_INTERVAL_MS) {
+ long refreshInterval = now - lastRefreshTime;
+ lastRefreshTime = now;
+ long lastCumCpuTimeMs = cumulativeCpuTimeMs;
+ reset();
+ String sysInfoStr = getSystemInfoInfoFromShell();
+ if (sysInfoStr != null) {
+ final int sysInfoSplitCount = 7;
+ String[] sysInfo = sysInfoStr.substring(0, sysInfoStr.indexOf("\r\n"))
+ .split(",");
+ if (sysInfo.length == sysInfoSplitCount) {
+ try {
+ vmemSize = Long.parseLong(sysInfo[0]);
+ memSize = Long.parseLong(sysInfo[1]);
+ vmemAvailable = Long.parseLong(sysInfo[2]);
+ memAvailable = Long.parseLong(sysInfo[3]);
+ numProcessors = Integer.parseInt(sysInfo[4]);
+ cpuFrequencyKhz = Long.parseLong(sysInfo[5]);
+ cumulativeCpuTimeMs = Long.parseLong(sysInfo[6]);
+ if (lastCumCpuTimeMs != -1) {
+ cpuUsage = (cumulativeCpuTimeMs - lastCumCpuTimeMs)
+ / (refreshInterval * 1.0f);
+ }
+ } catch (NumberFormatException nfe) {
+ LOG.warn("Error parsing sysInfo", nfe);
+ }
+ } else {
+ LOG.warn("Expected split length of sysInfo to be "
+ + sysInfoSplitCount + ". Got " + sysInfo.length);
+ }
+ }
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getVirtualMemorySize() {
+ refreshIfNeeded();
+ return vmemSize;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getPhysicalMemorySize() {
+ refreshIfNeeded();
+ return memSize;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ refreshIfNeeded();
+ return vmemAvailable;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ refreshIfNeeded();
+ return memAvailable;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumProcessors() {
+ refreshIfNeeded();
+ return numProcessors;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumCores() {
+ return getNumProcessors();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCpuFrequency() {
+ refreshIfNeeded();
+ return cpuFrequencyKhz;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCumulativeCpuTime() {
+ refreshIfNeeded();
+ return cumulativeCpuTimeMs;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public float getCpuUsage() {
+ refreshIfNeeded();
+ return cpuUsage;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
new file mode 100644
index 0000000..73edc77
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
@@ -0,0 +1,323 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * A JUnit test to test {@link SysInfoLinux}
+ * Create the fake /proc/ information and verify the parsing and calculation
+ */
+public class TestSysInfoLinux {
+ /**
+ * LinuxResourceCalculatorPlugin with a fake timer
+ */
+ static class FakeLinuxResourceCalculatorPlugin extends
+ SysInfoLinux {
+
+ long currentTime = 0;
+ public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
+ String procfsCpuFile,
+ String procfsStatFile,
+ long jiffyLengthInMillis) {
+ super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
+ }
+ @Override
+ long getCurrentTime() {
+ return currentTime;
+ }
+ public void advanceTime(long adv) {
+ currentTime += adv * this.getJiffyLengthInMillis();
+ }
+ }
+ private static final FakeLinuxResourceCalculatorPlugin plugin;
+ private static String TEST_ROOT_DIR = new Path(System.getProperty(
+ "test.build.data", "/tmp")).toString().replace(' ', '+');
+ private static final String FAKE_MEMFILE;
+ private static final String FAKE_CPUFILE;
+ private static final String FAKE_STATFILE;
+ private static final long FAKE_JIFFY_LENGTH = 10L;
+ static {
+ int randomNum = (new Random()).nextInt(1000000000);
+ FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
+ FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
+ FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
+ plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
+ FAKE_STATFILE,
+ FAKE_JIFFY_LENGTH);
+ }
+ static final String MEMINFO_FORMAT =
+ "MemTotal: %d kB\n" +
+ "MemFree: %d kB\n" +
+ "Buffers: 138244 kB\n" +
+ "Cached: 947780 kB\n" +
+ "SwapCached: 142880 kB\n" +
+ "Active: 3229888 kB\n" +
+ "Inactive: %d kB\n" +
+ "SwapTotal: %d kB\n" +
+ "SwapFree: %d kB\n" +
+ "Dirty: 122012 kB\n" +
+ "Writeback: 0 kB\n" +
+ "AnonPages: 2710792 kB\n" +
+ "Mapped: 24740 kB\n" +
+ "Slab: 132528 kB\n" +
+ "SReclaimable: 105096 kB\n" +
+ "SUnreclaim: 27432 kB\n" +
+ "PageTables: 11448 kB\n" +
+ "NFS_Unstable: 0 kB\n" +
+ "Bounce: 0 kB\n" +
+ "CommitLimit: 4125904 kB\n" +
+ "Committed_AS: 4143556 kB\n" +
+ "VmallocTotal: 34359738367 kB\n" +
+ "VmallocUsed: 1632 kB\n" +
+ "VmallocChunk: 34359736375 kB\n" +
+ "HugePages_Total: 0\n" +
+ "HugePages_Free: 0\n" +
+ "HugePages_Rsvd: 0\n" +
+ "Hugepagesize: 2048 kB";
+
+ static final String CPUINFO_FORMAT =
+ "processor : %s\n" +
+ "vendor_id : AuthenticAMD\n" +
+ "cpu family : 15\n" +
+ "model : 33\n" +
+ "model name : Dual Core AMD Opteron(tm) Processor 280\n" +
+ "stepping : 2\n" +
+ "cpu MHz : %f\n" +
+ "cache size : 1024 KB\n" +
+ "physical id : %s\n" +
+ "siblings : 2\n" +
+ "core id : %s\n" +
+ "cpu cores : 2\n" +
+ "fpu : yes\n" +
+ "fpu_exception : yes\n" +
+ "cpuid level : 1\n" +
+ "wp : yes\n" +
+ "flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
+ "pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
+ "3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
+ "bogomips : 4792.41\n" +
+ "TLB size : 1024 4K pages\n" +
+ "clflush size : 64\n" +
+ "cache_alignment : 64\n" +
+ "address sizes : 40 bits physical, 48 bits virtual\n" +
+ "power management: ts fid vid ttp";
+
+ static final String STAT_FILE_FORMAT =
+ "cpu %d %d %d 1646495089 831319 48713 164346 0\n" +
+ "cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
+ "cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
+ "cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
+ "cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
+ "intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
+ "ctxt 242017731764\n" +
+ "btime 1257808753\n" +
+ "processes 26414943\n" +
+ "procs_running 1\n" +
+ "procs_blocked 0\n";
+
+ /**
+ * Test parsing /proc/stat and /proc/cpuinfo
+ * @throws IOException
+ */
+ @Test
+ public void parsingProcStatAndCpuFile() throws IOException {
+ // Write fake /proc/cpuinfo file.
+ long numProcessors = 8;
+ long cpuFrequencyKHz = 2392781;
+ String fileContent = "";
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent +=
+ String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D, 0, 0)
+ + "\n";
+ }
+ File tempFile = new File(FAKE_CPUFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
+ fWriter.write(fileContent);
+ fWriter.close();
+ assertEquals(plugin.getNumProcessors(), numProcessors);
+ assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
+
+ // Write fake /proc/stat file.
+ long uTime = 54972994;
+ long nTime = 188860;
+ long sTime = 19803373;
+ tempFile = new File(FAKE_STATFILE);
+ tempFile.deleteOnExit();
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), (float)(CpuTimeTracker.UNAVAILABLE),0.0);
+
+ // Advance the time and sample again to test the CPU usage calculation
+ uTime += 100L;
+ plugin.advanceTime(200L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), 6.25F, 0.0);
+
+ // Advance the time and sample again. This time, we call getCpuUsage() only.
+ uTime += 600L;
+ plugin.advanceTime(300L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCpuUsage(), 25F, 0.0);
+
+ // Advance very short period of time (one jiffy length).
+ // In this case, CPU usage should not be updated.
+ uTime += 1L;
+ plugin.advanceTime(1L);
+ updateStatFile(uTime, nTime, sTime);
+ assertEquals(plugin.getCumulativeCpuTime(),
+ FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
+ assertEquals(plugin.getCpuUsage(), 25F, 0.0); // CPU usage is not updated.
+ }
+
+ /**
+ * Write information to fake /proc/stat file
+ */
+ private void updateStatFile(long uTime, long nTime, long sTime)
+ throws IOException {
+ FileWriter fWriter = new FileWriter(FAKE_STATFILE);
+ fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
+ fWriter.close();
+ }
+
+ /**
+ * Test parsing /proc/meminfo
+ * @throws IOException
+ */
+ @Test
+ public void parsingProcMemFile() throws IOException {
+ long memTotal = 4058864L;
+ long memFree = 99632L;
+ long inactive = 567732L;
+ long swapTotal = 2096472L;
+ long swapFree = 1818480L;
+ File tempFile = new File(FAKE_MEMFILE);
+ tempFile.deleteOnExit();
+ FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
+ fWriter.write(String.format(MEMINFO_FORMAT,
+ memTotal, memFree, inactive, swapTotal, swapFree));
+
+ fWriter.close();
+ assertEquals(plugin.getAvailablePhysicalMemorySize(),
+ 1024L * (memFree + inactive));
+ assertEquals(plugin.getAvailableVirtualMemorySize(),
+ 1024L * (memFree + inactive + swapFree));
+ assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
+ assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
+ }
+
+ @Test
+ public void testCoreCounts() throws IOException {
+
+ String fileContent = "";
+ // single core, hyper threading
+ long numProcessors = 2;
+ long cpuFrequencyKHz = 2392781;
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent =
+ fileContent.concat(String.format(CPUINFO_FORMAT, i,
+ cpuFrequencyKHz / 1000D, 0, 0));
+ fileContent = fileContent.concat("\n");
+ }
+ writeFakeCPUInfoFile(fileContent);
+ plugin.setReadCpuInfoFile(false);
+ assertEquals(numProcessors, plugin.getNumProcessors());
+ assertEquals(1, plugin.getNumCores());
+
+ // single socket quad core, no hyper threading
+ fileContent = "";
+ numProcessors = 4;
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent =
+ fileContent.concat(String.format(CPUINFO_FORMAT, i,
+ cpuFrequencyKHz / 1000D, 0, i));
+ fileContent = fileContent.concat("\n");
+ }
+ writeFakeCPUInfoFile(fileContent);
+ plugin.setReadCpuInfoFile(false);
+ assertEquals(numProcessors, plugin.getNumProcessors());
+ assertEquals(4, plugin.getNumCores());
+
+ // dual socket single core, hyper threading
+ fileContent = "";
+ numProcessors = 4;
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent =
+ fileContent.concat(String.format(CPUINFO_FORMAT, i,
+ cpuFrequencyKHz / 1000D, i / 2, 0));
+ fileContent = fileContent.concat("\n");
+ }
+ writeFakeCPUInfoFile(fileContent);
+ plugin.setReadCpuInfoFile(false);
+ assertEquals(numProcessors, plugin.getNumProcessors());
+ assertEquals(2, plugin.getNumCores());
+
+ // dual socket, dual core, no hyper threading
+ fileContent = "";
+ numProcessors = 4;
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent =
+ fileContent.concat(String.format(CPUINFO_FORMAT, i,
+ cpuFrequencyKHz / 1000D, i / 2, i % 2));
+ fileContent = fileContent.concat("\n");
+ }
+ writeFakeCPUInfoFile(fileContent);
+ plugin.setReadCpuInfoFile(false);
+ assertEquals(numProcessors, plugin.getNumProcessors());
+ assertEquals(4, plugin.getNumCores());
+
+ // dual socket, dual core, hyper threading
+ fileContent = "";
+ numProcessors = 8;
+ for (int i = 0; i < numProcessors; i++) {
+ fileContent =
+ fileContent.concat(String.format(CPUINFO_FORMAT, i,
+ cpuFrequencyKHz / 1000D, i / 4, (i % 4) / 2));
+ fileContent = fileContent.concat("\n");
+ }
+ writeFakeCPUInfoFile(fileContent);
+ plugin.setReadCpuInfoFile(false);
+ assertEquals(numProcessors, plugin.getNumProcessors());
+ assertEquals(4, plugin.getNumCores());
+ }
+
+ private void writeFakeCPUInfoFile(String content) throws IOException {
+ File tempFile = new File(FAKE_CPUFILE);
+ FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
+ tempFile.deleteOnExit();
+ try {
+ fWriter.write(content);
+ } finally {
+ IOUtils.closeQuietly(fWriter);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
new file mode 100644
index 0000000..7924c02
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+public class TestSysInfoWindows {
+
+
+ static class SysInfoWindowsMock extends SysInfoWindows {
+ private long time = SysInfoWindows.REFRESH_INTERVAL_MS + 1;
+ private String infoStr = null;
+ void setSysinfoString(String infoStr) {
+ this.infoStr = infoStr;
+ }
+ void advance(long dur) {
+ time += dur;
+ }
+ @Override
+ String getSystemInfoInfoFromShell() {
+ return infoStr;
+ }
+ @Override
+ long now() {
+ return time;
+ }
+ }
+
+ @Test(timeout = 10000)
+ public void parseSystemInfoString() {
+ SysInfoWindowsMock tester = new SysInfoWindowsMock();
+ tester.setSysinfoString(
+ "17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n");
+ // info str derived from windows shell command has \r\n termination
+ assertEquals(17177038848L, tester.getVirtualMemorySize());
+ assertEquals(8589467648L, tester.getPhysicalMemorySize());
+ assertEquals(15232745472L, tester.getAvailableVirtualMemorySize());
+ assertEquals(6400417792L, tester.getAvailablePhysicalMemorySize());
+ assertEquals(1, tester.getNumProcessors());
+ assertEquals(1, tester.getNumCores());
+ assertEquals(2805000L, tester.getCpuFrequency());
+ assertEquals(6261812L, tester.getCumulativeCpuTime());
+ // undef on first call
+ assertEquals(-1.0, tester.getCpuUsage(), 0.0);
+ }
+
+ @Test(timeout = 10000)
+ public void refreshAndCpuUsage() throws InterruptedException {
+ SysInfoWindowsMock tester = new SysInfoWindowsMock();
+ tester.setSysinfoString(
+ "17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n");
+ // info str derived from windows shell command has \r\n termination
+ tester.getAvailablePhysicalMemorySize();
+ // verify information has been refreshed
+ assertEquals(6400417792L, tester.getAvailablePhysicalMemorySize());
+ assertEquals(-1.0, tester.getCpuUsage(), 0.0);
+
+ tester.setSysinfoString(
+ "17177038848,8589467648,15232745472,5400417792,1,2805000,6263012\r\n");
+ tester.getAvailablePhysicalMemorySize();
+ // verify information has not been refreshed
+ assertEquals(6400417792L, tester.getAvailablePhysicalMemorySize());
+ assertEquals(-1.0, tester.getCpuUsage(), 0.0);
+
+ // advance clock
+ tester.advance(SysInfoWindows.REFRESH_INTERVAL_MS + 1);
+
+ // verify information has been refreshed
+ assertEquals(5400417792L, tester.getAvailablePhysicalMemorySize());
+ assertEquals((6263012 - 6261812) / (SysInfoWindows.REFRESH_INTERVAL_MS + 1f),
+ tester.getCpuUsage(), 0.0);
+ }
+
+ @Test(timeout = 10000)
+ public void errorInGetSystemInfo() {
+ SysInfoWindowsMock tester = new SysInfoWindowsMock();
+ // info str derived from windows shell command has \r\n termination
+ tester.setSysinfoString(null);
+ // call a method to refresh values
+ tester.getAvailablePhysicalMemorySize();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
deleted file mode 100644
index b09a4b6..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.util;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import java.math.BigInteger;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class CpuTimeTracker {
- public static final int UNAVAILABLE =
- ResourceCalculatorProcessTree.UNAVAILABLE;
- final long MINIMUM_UPDATE_INTERVAL;
-
- // CPU used time since system is on (ms)
- BigInteger cumulativeCpuTime = BigInteger.ZERO;
-
- // CPU used time read last time (ms)
- BigInteger lastCumulativeCpuTime = BigInteger.ZERO;
-
- // Unix timestamp while reading the CPU time (ms)
- long sampleTime;
- long lastSampleTime;
- float cpuUsage;
- BigInteger jiffyLengthInMillis;
-
- public CpuTimeTracker(long jiffyLengthInMillis) {
- this.jiffyLengthInMillis = BigInteger.valueOf(jiffyLengthInMillis);
- this.cpuUsage = UNAVAILABLE;
- this.sampleTime = UNAVAILABLE;
- this.lastSampleTime = UNAVAILABLE;
- MINIMUM_UPDATE_INTERVAL = 10 * jiffyLengthInMillis;
- }
-
- /**
- * Return percentage of cpu time spent over the time since last update.
- * CPU time spent is based on elapsed jiffies multiplied by amount of
- * time for 1 core. Thus, if you use 2 cores completely you would have spent
- * twice the actual time between updates and this will return 200%.
- *
- * @return Return percentage of cpu usage since last update, {@link
- * CpuTimeTracker#UNAVAILABLE} if there haven't been 2 updates more than
- * {@link CpuTimeTracker#MINIMUM_UPDATE_INTERVAL} apart
- */
- public float getCpuTrackerUsagePercent() {
- if (lastSampleTime == UNAVAILABLE ||
- lastSampleTime > sampleTime) {
- // lastSampleTime > sampleTime may happen when the system time is changed
- lastSampleTime = sampleTime;
- lastCumulativeCpuTime = cumulativeCpuTime;
- return cpuUsage;
- }
- // When lastSampleTime is sufficiently old, update cpuUsage.
- // Also take a sample of the current time and cumulative CPU time for the
- // use of the next calculation.
- if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) {
- cpuUsage =
- ((cumulativeCpuTime.subtract(lastCumulativeCpuTime)).floatValue())
- * 100F / ((float) (sampleTime - lastSampleTime));
- lastSampleTime = sampleTime;
- lastCumulativeCpuTime = cumulativeCpuTime;
- }
- return cpuUsage;
- }
-
- public void updateElapsedJiffies(BigInteger elapedJiffies, long sampleTime) {
- this.cumulativeCpuTime = elapedJiffies.multiply(jiffyLengthInMillis);
- this.sampleTime = sampleTime;
- }
-
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append("SampleTime " + this.sampleTime);
- sb.append(" CummulativeCpuTime " + this.cumulativeCpuTime);
- sb.append(" LastSampleTime " + this.lastSampleTime);
- sb.append(" LastCummulativeCpuTime " + this.lastCumulativeCpuTime);
- sb.append(" CpuUsage " + this.cpuUsage);
- sb.append(" JiffyLengthMillisec " + this.jiffyLengthInMillis);
- return sb.toString();
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
index bf4cfa4..f458f16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
@@ -15,25 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.yarn.util;
-import java.io.BufferedReader;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.InputStreamReader;
-import java.io.IOException;
-import java.math.BigInteger;
-import java.nio.charset.Charset;
-import java.util.HashSet;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.SysInfoLinux;
/**
* Plugin to calculate resource information on Linux systems.
@@ -41,383 +27,9 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
- private static final Log LOG =
- LogFactory.getLog(LinuxResourceCalculatorPlugin.class);
-
- /**
- * proc's meminfo virtual file has keys-values in the format
- * "key:[ \t]*value[ \t]kB".
- */
- private static final String PROCFS_MEMFILE = "/proc/meminfo";
- private static final Pattern PROCFS_MEMFILE_FORMAT =
- Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB");
-
- // We need the values for the following keys in meminfo
- private static final String MEMTOTAL_STRING = "MemTotal";
- private static final String SWAPTOTAL_STRING = "SwapTotal";
- private static final String MEMFREE_STRING = "MemFree";
- private static final String SWAPFREE_STRING = "SwapFree";
- private static final String INACTIVE_STRING = "Inactive";
-
- /**
- * Patterns for parsing /proc/cpuinfo.
- */
- private static final String PROCFS_CPUINFO = "/proc/cpuinfo";
- private static final Pattern PROCESSOR_FORMAT =
- Pattern.compile("^processor[ \t]:[ \t]*([0-9]*)");
- private static final Pattern FREQUENCY_FORMAT =
- Pattern.compile("^cpu MHz[ \t]*:[ \t]*([0-9.]*)");
- private static final Pattern PHYSICAL_ID_FORMAT =
- Pattern.compile("^physical id[ \t]*:[ \t]*([0-9]*)");
- private static final Pattern CORE_ID_FORMAT =
- Pattern.compile("^core id[ \t]*:[ \t]*([0-9]*)");
-
- /**
- * Pattern for parsing /proc/stat.
- */
- private static final String PROCFS_STAT = "/proc/stat";
- private static final Pattern CPU_TIME_FORMAT =
- Pattern.compile("^cpu[ \t]*([0-9]*)" +
- "[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
- private CpuTimeTracker cpuTimeTracker;
-
- private String procfsMemFile;
- private String procfsCpuFile;
- private String procfsStatFile;
- private long jiffyLengthInMillis;
-
- private long ramSize = 0;
- private long swapSize = 0;
- private long ramSizeFree = 0; // free ram space on the machine (kB)
- private long swapSizeFree = 0; // free swap space on the machine (kB)
- private long inactiveSize = 0; // inactive cache memory (kB)
- /* number of logical processors on the system. */
- private int numProcessors = 0;
- /* number of physical cores on the system. */
- private int numCores = 0;
- private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
-
- private boolean readMemInfoFile = false;
- private boolean readCpuInfoFile = false;
-
- /**
- * Get current time.
- * @return Unix time stamp in millisecond
- */
- long getCurrentTime() {
- return System.currentTimeMillis();
- }
public LinuxResourceCalculatorPlugin() {
- this(PROCFS_MEMFILE, PROCFS_CPUINFO, PROCFS_STAT,
- ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS);
- }
-
- /**
- * Constructor which allows assigning the /proc/ directories. This will be
- * used only in unit tests.
- * @param procfsMemFile fake file for /proc/meminfo
- * @param procfsCpuFile fake file for /proc/cpuinfo
- * @param procfsStatFile fake file for /proc/stat
- * @param jiffyLengthInMillis fake jiffy length value
- */
- public LinuxResourceCalculatorPlugin(String procfsMemFile,
- String procfsCpuFile,
- String procfsStatFile,
- long jiffyLengthInMillis) {
- this.procfsMemFile = procfsMemFile;
- this.procfsCpuFile = procfsCpuFile;
- this.procfsStatFile = procfsStatFile;
- this.jiffyLengthInMillis = jiffyLengthInMillis;
- this.cpuTimeTracker = new CpuTimeTracker(jiffyLengthInMillis);
- }
-
- /**
- * Read /proc/meminfo, parse and compute memory information only once.
- */
- private void readProcMemInfoFile() {
- readProcMemInfoFile(false);
- }
-
- /**
- * Read /proc/meminfo, parse and compute memory information.
- * @param readAgain if false, read only on the first time
- */
- private void readProcMemInfoFile(boolean readAgain) {
-
- if (readMemInfoFile && !readAgain) {
- return;
- }
-
- // Read "/proc/memInfo" file
- BufferedReader in;
- InputStreamReader fReader;
- try {
- fReader = new InputStreamReader(
- new FileInputStream(procfsMemFile), Charset.forName("UTF-8"));
- in = new BufferedReader(fReader);
- } catch (FileNotFoundException f) {
- // shouldn't happen....
- LOG.warn("Couldn't read " + procfsMemFile
- + "; can't determine memory settings");
- return;
- }
-
- Matcher mat;
-
- try {
- String str = in.readLine();
- while (str != null) {
- mat = PROCFS_MEMFILE_FORMAT.matcher(str);
- if (mat.find()) {
- if (mat.group(1).equals(MEMTOTAL_STRING)) {
- ramSize = Long.parseLong(mat.group(2));
- } else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
- swapSize = Long.parseLong(mat.group(2));
- } else if (mat.group(1).equals(MEMFREE_STRING)) {
- ramSizeFree = Long.parseLong(mat.group(2));
- } else if (mat.group(1).equals(SWAPFREE_STRING)) {
- swapSizeFree = Long.parseLong(mat.group(2));
- } else if (mat.group(1).equals(INACTIVE_STRING)) {
- inactiveSize = Long.parseLong(mat.group(2));
- }
- }
- str = in.readLine();
- }
- } catch (IOException io) {
- LOG.warn("Error reading the stream " + io);
- } finally {
- // Close the streams
- try {
- fReader.close();
- try {
- in.close();
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + in);
- }
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + fReader);
- }
- }
-
- readMemInfoFile = true;
- }
-
- /**
- * Read /proc/cpuinfo, parse and calculate CPU information.
- */
- private void readProcCpuInfoFile() {
- // This directory needs to be read only once
- if (readCpuInfoFile) {
- return;
- }
- HashSet<String> coreIdSet = new HashSet<>();
- // Read "/proc/cpuinfo" file
- BufferedReader in;
- InputStreamReader fReader;
- try {
- fReader = new InputStreamReader(
- new FileInputStream(procfsCpuFile), Charset.forName("UTF-8"));
- in = new BufferedReader(fReader);
- } catch (FileNotFoundException f) {
- // shouldn't happen....
- LOG.warn("Couldn't read " + procfsCpuFile + "; can't determine cpu info");
- return;
- }
- Matcher mat;
- try {
- numProcessors = 0;
- numCores = 1;
- String currentPhysicalId = "";
- String str = in.readLine();
- while (str != null) {
- mat = PROCESSOR_FORMAT.matcher(str);
- if (mat.find()) {
- numProcessors++;
- }
- mat = FREQUENCY_FORMAT.matcher(str);
- if (mat.find()) {
- cpuFrequency = (long)(Double.parseDouble(mat.group(1)) * 1000); // kHz
- }
- mat = PHYSICAL_ID_FORMAT.matcher(str);
- if (mat.find()) {
- currentPhysicalId = str;
- }
- mat = CORE_ID_FORMAT.matcher(str);
- if (mat.find()) {
- coreIdSet.add(currentPhysicalId + " " + str);
- numCores = coreIdSet.size();
- }
- str = in.readLine();
- }
- } catch (IOException io) {
- LOG.warn("Error reading the stream " + io);
- } finally {
- // Close the streams
- try {
- fReader.close();
- try {
- in.close();
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + in);
- }
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + fReader);
- }
- }
- readCpuInfoFile = true;
- }
-
- /**
- * Read /proc/stat file, parse and calculate cumulative CPU.
- */
- private void readProcStatFile() {
- // Read "/proc/stat" file
- BufferedReader in;
- InputStreamReader fReader;
- try {
- fReader = new InputStreamReader(
- new FileInputStream(procfsStatFile), Charset.forName("UTF-8"));
- in = new BufferedReader(fReader);
- } catch (FileNotFoundException f) {
- // shouldn't happen....
- return;
- }
-
- Matcher mat;
- try {
- String str = in.readLine();
- while (str != null) {
- mat = CPU_TIME_FORMAT.matcher(str);
- if (mat.find()) {
- long uTime = Long.parseLong(mat.group(1));
- long nTime = Long.parseLong(mat.group(2));
- long sTime = Long.parseLong(mat.group(3));
- cpuTimeTracker.updateElapsedJiffies(
- BigInteger.valueOf(uTime + nTime + sTime),
- getCurrentTime());
- break;
- }
- str = in.readLine();
- }
- } catch (IOException io) {
- LOG.warn("Error reading the stream " + io);
- } finally {
- // Close the streams
- try {
- fReader.close();
- try {
- in.close();
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + in);
- }
- } catch (IOException i) {
- LOG.warn("Error closing the stream " + fReader);
- }
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public long getPhysicalMemorySize() {
- readProcMemInfoFile();
- return ramSize * 1024;
+ super(new SysInfoLinux());
}
- /** {@inheritDoc} */
- @Override
- public long getVirtualMemorySize() {
- readProcMemInfoFile();
- return (ramSize + swapSize) * 1024;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getAvailablePhysicalMemorySize() {
- readProcMemInfoFile(true);
- return (ramSizeFree + inactiveSize) * 1024;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getAvailableVirtualMemorySize() {
- readProcMemInfoFile(true);
- return (ramSizeFree + swapSizeFree + inactiveSize) * 1024;
- }
-
- /** {@inheritDoc} */
- @Override
- public int getNumProcessors() {
- readProcCpuInfoFile();
- return numProcessors;
- }
-
- /** {@inheritDoc} */
- @Override
- public int getNumCores() {
- readProcCpuInfoFile();
- return numCores;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getCpuFrequency() {
- readProcCpuInfoFile();
- return cpuFrequency;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getCumulativeCpuTime() {
- readProcStatFile();
- return cpuTimeTracker.cumulativeCpuTime.longValue();
- }
-
- /** {@inheritDoc} */
- @Override
- public float getCpuUsage() {
- readProcStatFile();
- float overallCpuUsage = cpuTimeTracker.getCpuTrackerUsagePercent();
- if (overallCpuUsage != CpuTimeTracker.UNAVAILABLE) {
- overallCpuUsage = overallCpuUsage / getNumProcessors();
- }
- return overallCpuUsage;
- }
-
- /**
- * Test the {@link LinuxResourceCalculatorPlugin}.
- *
- * @param args - arguments to this calculator test
- */
- public static void main(String[] args) {
- LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
- System.out.println("Physical memory Size (bytes) : "
- + plugin.getPhysicalMemorySize());
- System.out.println("Total Virtual memory Size (bytes) : "
- + plugin.getVirtualMemorySize());
- System.out.println("Available Physical memory Size (bytes) : "
- + plugin.getAvailablePhysicalMemorySize());
- System.out.println("Total Available Virtual memory Size (bytes) : "
- + plugin.getAvailableVirtualMemorySize());
- System.out.println("Number of Processors : " + plugin.getNumProcessors());
- System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency());
- System.out.println("Cumulative CPU time (ms) : " +
- plugin.getCumulativeCpuTime());
- try {
- // Sleep so we can compute the CPU usage
- Thread.sleep(500L);
- } catch (InterruptedException e) {
- // do nothing
- }
- System.out.println("CPU usage % : " + plugin.getCpuUsage());
- }
-
- @VisibleForTesting
- void setReadCpuInfoFile(boolean readCpuInfoFileValue) {
- this.readCpuInfoFile = readCpuInfoFileValue;
- }
-
- public long getJiffyLengthInMillis() {
- return this.jiffyLengthInMillis;
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index df9d28a..2345c62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -40,9 +40,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.CpuTimeTracker;
import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.SysInfoLinux;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
@@ -64,8 +64,9 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
public static final String PROCFS_STAT_FILE = "stat";
public static final String PROCFS_CMDLINE_FILE = "cmdline";
- public static final long PAGE_SIZE;
- public static final long JIFFY_LENGTH_IN_MILLIS; // in millisecond
+ public static final long PAGE_SIZE = SysInfoLinux.PAGE_SIZE;
+ public static final long JIFFY_LENGTH_IN_MILLIS =
+ SysInfoLinux.JIFFY_LENGTH_IN_MILLIS; // in millisecond
private final CpuTimeTracker cpuTimeTracker;
private Clock clock;
@@ -108,31 +109,6 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
protected Map<String, ProcessTreeSmapMemInfo> processSMAPTree =
new HashMap<String, ProcessTreeSmapMemInfo>();
- static {
- long jiffiesPerSecond = -1;
- long pageSize = -1;
- try {
- if(Shell.LINUX) {
- ShellCommandExecutor shellExecutorClk = new ShellCommandExecutor(
- new String[] { "getconf", "CLK_TCK" });
- shellExecutorClk.execute();
- jiffiesPerSecond = Long.parseLong(shellExecutorClk.getOutput().replace("\n", ""));
-
- ShellCommandExecutor shellExecutorPage = new ShellCommandExecutor(
- new String[] { "getconf", "PAGESIZE" });
- shellExecutorPage.execute();
- pageSize = Long.parseLong(shellExecutorPage.getOutput().replace("\n", ""));
-
- }
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- } finally {
- JIFFY_LENGTH_IN_MILLIS = jiffiesPerSecond != -1 ?
- Math.round(1000D / jiffiesPerSecond) : -1;
- PAGE_SIZE = pageSize;
- }
- }
-
// to enable testing, using this variable which can be configured
// to a test directory.
private String procfsDir;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index 40bd44e..5e5f1b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -23,29 +22,42 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.SysInfo;
/**
* Plugin to calculate resource information on the system.
- *
*/
@InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"})
@InterfaceStability.Unstable
-public abstract class ResourceCalculatorPlugin extends Configured {
+public class ResourceCalculatorPlugin extends Configured {
+
+ private final SysInfo sys;
+
+ protected ResourceCalculatorPlugin() {
+ this(SysInfo.newInstance());
+ }
+
+ public ResourceCalculatorPlugin(SysInfo sys) {
+ this.sys = sys;
+ }
/**
* Obtain the total size of the virtual memory present in the system.
*
* @return virtual memory size in bytes.
*/
- public abstract long getVirtualMemorySize();
+ public long getVirtualMemorySize() {
+ return sys.getVirtualMemorySize();
+ }
/**
* Obtain the total size of the physical memory present in the system.
*
* @return physical memory size bytes.
*/
- public abstract long getPhysicalMemorySize();
+ public long getPhysicalMemorySize() {
+ return sys.getPhysicalMemorySize();
+ }
/**
* Obtain the total size of the available virtual memory present
@@ -53,7 +65,9 @@ public abstract class ResourceCalculatorPlugin extends Configured {
*
* @return available virtual memory size in bytes.
*/
- public abstract long getAvailableVirtualMemorySize();
+ public long getAvailableVirtualMemorySize() {
+ return sys.getAvailableVirtualMemorySize();
+ }
/**
* Obtain the total size of the available physical memory present
@@ -61,42 +75,54 @@ public abstract class ResourceCalculatorPlugin extends Configured {
*
* @return available physical memory size bytes.
*/
- public abstract long getAvailablePhysicalMemorySize();
+ public long getAvailablePhysicalMemorySize() {
+ return sys.getAvailablePhysicalMemorySize();
+ }
/**
* Obtain the total number of logical processors present on the system.
*
* @return number of logical processors
*/
- public abstract int getNumProcessors();
+ public int getNumProcessors() {
+ return sys.getNumProcessors();
+ }
/**
* Obtain total number of physical cores present on the system.
*
* @return number of physical cores
*/
- public abstract int getNumCores();
+ public int getNumCores() {
+ return sys.getNumCores();
+ }
/**
* Obtain the CPU frequency of on the system.
*
* @return CPU frequency in kHz
*/
- public abstract long getCpuFrequency();
+ public long getCpuFrequency() {
+ return sys.getCpuFrequency();
+ }
/**
* Obtain the cumulative CPU time since the system is on.
*
* @return cumulative CPU time in milliseconds
*/
- public abstract long getCumulativeCpuTime();
+ public long getCumulativeCpuTime() {
+ return sys.getCumulativeCpuTime();
+ }
/**
* Obtain the CPU usage % of the machine. Return -1 if it is unavailable
*
* @return CPU usage in %
*/
- public abstract float getCpuUsage();
+ public float getCpuUsage() {
+ return sys.getCpuUsage();
+ }
/**
* Create the ResourceCalculatorPlugin from the class name and configure it. If
@@ -114,21 +140,11 @@ public abstract class ResourceCalculatorPlugin extends Configured {
if (clazz != null) {
return ReflectionUtils.newInstance(clazz, conf);
}
-
- // No class given, try a os specific class
try {
- if (Shell.LINUX) {
- return new LinuxResourceCalculatorPlugin();
- }
- if (Shell.WINDOWS) {
- return new WindowsResourceCalculatorPlugin();
- }
- } catch (SecurityException se) {
- // Failed to get Operating System name.
+ return new ResourceCalculatorPlugin();
+ } catch (SecurityException e) {
return null;
}
-
- // Not supported on this system.
- return null;
}
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
index 7d9c7d3..ebe8df1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
@@ -229,7 +229,7 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
@Override
public float getCpuUsagePercent() {
- return CpuTimeTracker.UNAVAILABLE;
+ return UNAVAILABLE;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac604837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
index cdbf525..f817b7a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
@@ -15,162 +15,18 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.yarn.util;
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.SysInfoWindows;
-@Private
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
public class WindowsResourceCalculatorPlugin extends ResourceCalculatorPlugin {
-
- static final Log LOG = LogFactory
- .getLog(WindowsResourceCalculatorPlugin.class);
-
- long vmemSize;
- long memSize;
- long vmemAvailable;
- long memAvailable;
- int numProcessors;
- long cpuFrequencyKhz;
- long cumulativeCpuTimeMs;
- float cpuUsage;
-
- long lastRefreshTime;
- private final int refreshIntervalMs = 1000;
-
- WindowsBasedProcessTree pTree = null;
-
- public WindowsResourceCalculatorPlugin() {
- lastRefreshTime = 0;
- reset();
- }
-
- void reset() {
- vmemSize = -1;
- memSize = -1;
- vmemAvailable = -1;
- memAvailable = -1;
- numProcessors = -1;
- cpuFrequencyKhz = -1;
- cumulativeCpuTimeMs = -1;
- cpuUsage = -1;
- }
-
- String getSystemInfoInfoFromShell() {
- ShellCommandExecutor shellExecutor = new ShellCommandExecutor(
- new String[] { Shell.WINUTILS, "systeminfo" });
- try {
- shellExecutor.execute();
- return shellExecutor.getOutput();
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- return null;
- }
-
- void refreshIfNeeded() {
- long now = System.currentTimeMillis();
- if (now - lastRefreshTime > refreshIntervalMs) {
- long refreshInterval = now - lastRefreshTime;
- lastRefreshTime = now;
- long lastCumCpuTimeMs = cumulativeCpuTimeMs;
- reset();
- String sysInfoStr = getSystemInfoInfoFromShell();
- if (sysInfoStr != null) {
- final int sysInfoSplitCount = 7;
- String[] sysInfo = sysInfoStr.substring(0, sysInfoStr.indexOf("\r\n"))
- .split(",");
- if (sysInfo.length == sysInfoSplitCount) {
- try {
- vmemSize = Long.parseLong(sysInfo[0]);
- memSize = Long.parseLong(sysInfo[1]);
- vmemAvailable = Long.parseLong(sysInfo[2]);
- memAvailable = Long.parseLong(sysInfo[3]);
- numProcessors = Integer.parseInt(sysInfo[4]);
- cpuFrequencyKhz = Long.parseLong(sysInfo[5]);
- cumulativeCpuTimeMs = Long.parseLong(sysInfo[6]);
- if (lastCumCpuTimeMs != -1) {
- cpuUsage = (cumulativeCpuTimeMs - lastCumCpuTimeMs)
- / (refreshInterval * 1.0f);
- }
-
- } catch (NumberFormatException nfe) {
- LOG.warn("Error parsing sysInfo." + nfe);
- }
- } else {
- LOG.warn("Expected split length of sysInfo to be "
- + sysInfoSplitCount + ". Got " + sysInfo.length);
- }
- }
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public long getVirtualMemorySize() {
- refreshIfNeeded();
- return vmemSize;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getPhysicalMemorySize() {
- refreshIfNeeded();
- return memSize;
- }
- /** {@inheritDoc} */
- @Override
- public long getAvailableVirtualMemorySize() {
- refreshIfNeeded();
- return vmemAvailable;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getAvailablePhysicalMemorySize() {
- refreshIfNeeded();
- return memAvailable;
- }
-
- /** {@inheritDoc} */
- @Override
- public int getNumProcessors() {
- refreshIfNeeded();
- return numProcessors;
- }
-
- /** {@inheritDoc} */
- @Override
- public int getNumCores() {
- return getNumProcessors();
- }
-
- /** {@inheritDoc} */
- @Override
- public long getCpuFrequency() {
- refreshIfNeeded();
- return cpuFrequencyKhz;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getCumulativeCpuTime() {
- refreshIfNeeded();
- return cumulativeCpuTimeMs;
+ public WindowsResourceCalculatorPlugin() {
+ super(new SysInfoWindows());
}
- /** {@inheritDoc} */
- @Override
- public float getCpuUsage() {
- refreshIfNeeded();
- return cpuUsage;
- }
}
[06/21] hadoop git commit: HADOOP-12201. Add tracing to
FileSystem#createFileSystem and Globber#glob (cmccabe)
Posted by aw...@apache.org.
HADOOP-12201. Add tracing to FileSystem#createFileSystem and Globber#glob (cmccabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8832fcf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8832fcf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8832fcf
Branch: refs/heads/HADOOP-12111
Commit: b8832fcf1e2ae1e43d5e4523016731af40ab58d7
Parents: 2e3d83f
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Jul 8 20:07:21 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Jul 8 20:07:21 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/fs/FileSystem.java | 20 ++++++++++++++++----
.../main/java/org/apache/hadoop/fs/Globber.java | 17 +++++++++++++++++
3 files changed, 36 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8832fcf/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index eb18e6c..c99fb5e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -687,6 +687,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12195. Add annotation to package-info.java file to workaround
MCOMPILER-205. (wang)
+ HADOOP-12201. Add tracing to FileSystem#createFileSystem and Globber#glob
+ (cmccabe)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8832fcf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index c73caf7..5e03e88 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -67,6 +67,9 @@ import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
import com.google.common.annotations.VisibleForTesting;
@@ -2675,10 +2678,19 @@ public abstract class FileSystem extends Configured implements Closeable {
private static FileSystem createFileSystem(URI uri, Configuration conf
) throws IOException {
- Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
- FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
- fs.initialize(uri, conf);
- return fs;
+ TraceScope scope = Trace.startSpan("FileSystem#createFileSystem");
+ Span span = scope.getSpan();
+ if (span != null) {
+ span.addKVAnnotation("scheme", uri.getScheme());
+ }
+ try {
+ Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
+ FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
+ fs.initialize(uri, conf);
+ return fs;
+ } finally {
+ scope.close();
+ }
}
/** Caching FileSystem objects */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8832fcf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
index 9cb810f..48639b4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
@@ -28,6 +28,10 @@ import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+
@InterfaceAudience.Private
@InterfaceStability.Unstable
class Globber {
@@ -136,6 +140,19 @@ class Globber {
}
public FileStatus[] glob() throws IOException {
+ TraceScope scope = Trace.startSpan("Globber#glob");
+ Span span = scope.getSpan();
+ if (span != null) {
+ span.addKVAnnotation("pattern", pathPattern.toUri().getPath());
+ }
+ try {
+ return doGlob();
+ } finally {
+ scope.close();
+ }
+ }
+
+ private FileStatus[] doGlob() throws IOException {
// First we get the scheme and authority of the pattern that was passed
// in.
String scheme = schemeFromPath(pathPattern);
[19/21] hadoop git commit: HDFS-2956. calling fetchdt without a
--renewer argument throws NPE (Contributed by Vinayakumar B)HDFS-2956.
calling fetchdt without a --renewer argument throws NPE (Contributed by
Vinayakumar B)
Posted by aw...@apache.org.
HDFS-2956. calling fetchdt without a --renewer argument throws NPE (Contributed by Vinayakumar B)HDFS-2956. calling fetchdt without a --renewer argument throws NPE (Contributed by Vinayakumar B)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4890803
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4890803
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4890803
Branch: refs/heads/HADOOP-12111
Commit: b48908033fcac7a4bd4313c1fd1457999fba08e1
Parents: d66302e
Author: Vinayakumar B <vi...@apache.org>
Authored: Fri Jul 10 15:47:04 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Fri Jul 10 15:47:04 2015 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../ClientNamenodeProtocolTranslatorPB.java | 2 +-
.../hdfs/tools/TestDelegationTokenFetcher.java | 39 ++++++++++++++++++++
3 files changed, 43 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4890803/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5c1208d..13b2621 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1029,6 +1029,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8749. Fix findbugs warnings in BlockManager.java.
(Brahma Reddy Battula via aajisaka)
+ HDFS-2956. calling fetchdt without a --renewer argument throws NPE
+ (vinayakumarb)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4890803/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 4ec6f9e..566d54f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -929,7 +929,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
throws IOException {
GetDelegationTokenRequestProto req = GetDelegationTokenRequestProto
.newBuilder()
- .setRenewer(renewer.toString())
+ .setRenewer(renewer == null ? "" : renewer.toString())
.build();
try {
GetDelegationTokenResponseProto resp = rpcProxy.getDelegationToken(null, req);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4890803/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
index ab3933b..80a1a6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
@@ -18,7 +18,10 @@
package org.apache.hadoop.hdfs.tools;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
@@ -28,12 +31,18 @@ import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.tools.FakeRenewer;
import org.junit.Assert;
import org.junit.Rule;
@@ -105,4 +114,34 @@ public class TestDelegationTokenFetcher {
Assert.assertFalse(p.getFileSystem(conf).exists(p));
}
+
+ @Test
+ public void testDelegationTokenWithoutRenewerViaRPC() throws Exception {
+ conf.setBoolean(DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ .build();
+ try {
+ cluster.waitActive();
+ DistributedFileSystem fs = cluster.getFileSystem();
+ // Should be able to fetch token without renewer.
+ LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
+ Path p = new Path(f.getRoot().getAbsolutePath(), tokenFile);
+ p = localFileSystem.makeQualified(p);
+ DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
+ Credentials creds = Credentials.readTokenStorageFile(p, conf);
+ Iterator<Token<?>> itr = creds.getAllTokens().iterator();
+ assertTrue("token not exist error", itr.hasNext());
+ assertNotNull("Token should be there without renewer", itr.next());
+ try {
+ // Without renewer renewal of token should fail.
+ DelegationTokenFetcher.renewTokens(conf, p);
+ fail("Should have failed to renew");
+ } catch (AccessControlException e) {
+ GenericTestUtils.assertExceptionContains(
+ "tried to renew a token without a renewer", e);
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
}
[05/21] hadoop git commit: HADOOP-11878. FileContext#fixRelativePart
should check for not null for a more informative exception. (Brahma Reddy
Battula via kasha)
Posted by aw...@apache.org.
HADOOP-11878. FileContext#fixRelativePart should check for not null for a more informative exception. (Brahma Reddy Battula via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e3d83f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e3d83f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e3d83f9
Branch: refs/heads/HADOOP-12111
Commit: 2e3d83f97b85c015f16f1c162b8ef0e7fee1ddf2
Parents: 625d7ed
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Jul 8 11:34:34 2015 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Wed Jul 8 11:34:34 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/fs/FileContext.java | 3 +++
2 files changed, 6 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e3d83f9/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6cc6b71..eb18e6c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -722,6 +722,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12194. Support for incremental generation in the protoc plugin.
(wang)
+ HADOOP-11878. FileContext#fixRelativePart should check for not null for a
+ more informative exception. (Brahma Reddy Battula via kasha)
+
BUG FIXES
HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e3d83f9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 122ddf6..0f21a61 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ShutdownHookManager;
+import com.google.common.base.Preconditions;
+
/**
* The FileContext class provides an interface for users of the Hadoop
* file system. It exposes a number of file system operations, e.g. create,
@@ -262,6 +264,7 @@ public class FileContext {
* has been deliberately declared private.
*/
Path fixRelativePart(Path p) {
+ Preconditions.checkNotNull(p, "path cannot be null");
if (p.isUriPathAbsolute()) {
return p;
} else {