You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/05/15 23:58:08 UTC
[01/50] [abbrv] hadoop git commit: HDDS-37. Remove dependency of
hadoop-hdds-common and hadoop-hdds-server-scm from
hadoop-ozone/tools/pom.xml. Contributed by Sandeep Nemuri. [Forced Update!]
Repository: hadoop
Updated Branches:
refs/heads/HDDS-4 c9c79f775 -> 938baa219 (forced update)
HDDS-37. Remove dependency of hadoop-hdds-common and hadoop-hdds-server-scm from hadoop-ozone/tools/pom.xml.
Contributed by Sandeep Nemuri.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db1ab0fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db1ab0fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db1ab0fc
Branch: refs/heads/HDDS-4
Commit: db1ab0fc1674177fdbe8f50c557aa4052ce77efc
Parents: 7369f41
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 10 16:27:21 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu May 10 16:27:21 2018 -0700
----------------------------------------------------------------------
hadoop-ozone/tools/pom.xml | 12 ------------
1 file changed, 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/db1ab0fc/hadoop-ozone/tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index e586f1b..a78565a 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -49,18 +49,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>metrics-core</artifactId>
<version>3.2.4</version>
</dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdds-server-scm</artifactId>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdds-common</artifactId>
- <scope>provided</scope>
- </dependency>
-
<dependency>
<groupId>org.openjdk.jmh</groupId>
<artifactId>jmh-core</artifactId>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[42/50] [abbrv] hadoop git commit: HADOOP-15466. Correct units in
adl.http.timeout. Contributed by Sean Mackrory,
Posted by xy...@apache.org.
HADOOP-15466. Correct units in adl.http.timeout.
Contributed by Sean Mackrory,
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07d8505f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07d8505f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07d8505f
Branch: refs/heads/HDDS-4
Commit: 07d8505f75ec401e5847fe158dad765ce5175fab
Parents: b670837
Author: Steve Loughran <st...@apache.org>
Authored: Tue May 15 16:19:03 2018 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue May 15 16:19:03 2018 +0100
----------------------------------------------------------------------
.../hadoop-common/src/main/resources/core-default.xml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07d8505f/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 98b0ddf..7ba23d4 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2933,8 +2933,8 @@
<name>adl.http.timeout</name>
<value>-1</value>
<description>
- Base timeout (in seconds) for HTTP requests from the ADL SDK. Values of
- zero or less cause the SDK default to be used instead.
+ Base timeout (in milliseconds) for HTTP requests from the ADL SDK. Values
+ of zero or less cause the SDK default to be used instead.
</description>
</property>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[46/50] [abbrv] hadoop git commit: HDFS-13567.
TestNameNodeMetrics#testGenerateEDEKTime,
TestNameNodeMetrics#testResourceCheck should use a different cluster basedir.
Contributed by Anbang Hu.
Posted by xy...@apache.org.
HDFS-13567. TestNameNodeMetrics#testGenerateEDEKTime,TestNameNodeMetrics#testResourceCheck should use a different cluster basedir. Contributed by Anbang Hu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63480976
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63480976
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63480976
Branch: refs/heads/HDDS-4
Commit: 63480976a008bab0b84f4da08b1099867e5a9a6b
Parents: 2d6195c
Author: Inigo Goiri <in...@apache.org>
Authored: Tue May 15 15:34:54 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue May 15 15:34:54 2018 -0700
----------------------------------------------------------------------
.../hdfs/server/namenode/metrics/TestNameNodeMetrics.java | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/63480976/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index db9adbe..92bc51c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -992,8 +992,10 @@ public class TestNameNodeMetrics {
.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
2);
+ File basedir = new File(MiniDFSCluster.getBaseDirectory(),
+ GenericTestUtils.getMethodName());
- try (MiniDFSCluster clusterEDEK = new MiniDFSCluster.Builder(conf)
+ try (MiniDFSCluster clusterEDEK = new MiniDFSCluster.Builder(conf, basedir)
.numDataNodes(1).build()) {
DistributedFileSystem fsEDEK =
@@ -1029,7 +1031,9 @@ public class TestNameNodeMetrics {
@Test
public void testResourceCheck() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
- MiniDFSCluster tmpCluster = new MiniDFSCluster.Builder(conf)
+ File basedir = new File(MiniDFSCluster.getBaseDirectory(),
+ GenericTestUtils.getMethodName());
+ MiniDFSCluster tmpCluster = new MiniDFSCluster.Builder(conf, basedir)
.numDataNodes(0)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.build();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[23/50] [abbrv] hadoop git commit: HDDS-53. Fix
TestKey#testPutAndGetKeyWithDnRestart. Contributed by Mukul Kumar Singh.
Posted by xy...@apache.org.
HDDS-53. Fix TestKey#testPutAndGetKeyWithDnRestart.
Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd8b9e91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd8b9e91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd8b9e91
Branch: refs/heads/HDDS-4
Commit: cd8b9e913e9a27196d5622feab68d679c0b552c5
Parents: 7e26e1f
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 10:13:13 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sat May 12 10:24:05 2018 -0700
----------------------------------------------------------------------
.../apache/hadoop/ozone/MiniOzoneClusterImpl.java | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd8b9e91/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 17872f4..08d7176 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -62,6 +62,14 @@ import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+ .DFS_CONTAINER_IPC_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+ .DFS_CONTAINER_IPC_RANDOM_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+ .DFS_CONTAINER_RATIS_IPC_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+ .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
/**
* MiniOzoneCluster creates a complete in-process Ozone cluster suitable for
@@ -211,6 +219,14 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
HddsDatanodeService datanodeService = hddsDatanodes.get(i);
datanodeService.stop();
datanodeService.join();
+ // ensure same ports are used across restarts.
+ Configuration conf = datanodeService.getConf();
+ int currentPort = datanodeService.getDatanodeDetails().getContainerPort();
+ conf.setInt(DFS_CONTAINER_IPC_PORT, currentPort);
+ conf.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false);
+ int ratisPort = datanodeService.getDatanodeDetails().getRatisPort();
+ conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
+ conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
datanodeService.start(null);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/50] [abbrv] hadoop git commit: HDDS-25. Simple async event
processing for SCM. Contributed by Elek, Marton.
Posted by xy...@apache.org.
HDDS-25. Simple async event processing for SCM.
Contributed by Elek, Marton.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba12e880
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba12e880
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba12e880
Branch: refs/heads/HDDS-4
Commit: ba12e8805e2ae6f125042bfb1d6b3cfc10faf9ed
Parents: 1f10a36
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 11 11:35:21 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri May 11 11:36:52 2018 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdds/server/events/Event.java | 42 ++++
.../hdds/server/events/EventExecutor.java | 68 ++++++
.../hadoop/hdds/server/events/EventHandler.java | 33 +++
.../hdds/server/events/EventPublisher.java | 28 +++
.../hadoop/hdds/server/events/EventQueue.java | 213 +++++++++++++++++++
.../server/events/SingleThreadExecutor.java | 103 +++++++++
.../hadoop/hdds/server/events/TypedEvent.java | 51 +++++
.../hadoop/hdds/server/events/package-info.java | 23 ++
.../hdds/server/events/TestEventQueue.java | 113 ++++++++++
.../hdds/server/events/TestEventQueueChain.java | 79 +++++++
10 files changed, 753 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java
new file mode 100644
index 0000000..810c8b3
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Identifier of an async event.
+ *
+ * @param <PAYLOAD> THe message payload type of this event.
+ */
+public interface Event<PAYLOAD> {
+
+ /**
+ * The type of the event payload. Payload contains all the required data
+ * to process the event.
+ *
+ */
+ Class<PAYLOAD> getPayloadType();
+
+ /**
+ * The human readable name of the event.
+ *
+ * Used for display in thread names
+ * and monitoring.
+ *
+ */
+ String getName();
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java
new file mode 100644
index 0000000..4257839
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Executors defined the way how an EventHandler should be called.
+ * <p>
+ * Executors are used only by the EventQueue and they do the thread separation
+ * between the caller and the EventHandler.
+ * <p>
+ * Executors should guarantee that only one thread is executing one
+ * EventHandler at the same time.
+ *
+ * @param <PAYLOAD> the payload type of the event.
+ */
+public interface EventExecutor<PAYLOAD> extends AutoCloseable {
+
+ /**
+ * Process an event payload.
+ *
+ * @param handler the handler to process the payload
+ * @param eventPayload to be processed.
+ * @param publisher to send response/other message forward to the chain.
+ */
+ void onMessage(EventHandler<PAYLOAD> handler,
+ PAYLOAD eventPayload,
+ EventPublisher
+ publisher);
+
+ /**
+ * Return the number of the failed events.
+ */
+ long failedEvents();
+
+
+ /**
+ * Return the number of the processed events.
+ */
+ long successfulEvents();
+
+ /**
+ * Return the number of the not-yet processed events.
+ */
+ long queuedEvents();
+
+ /**
+ * The human readable name for the event executor.
+ * <p>
+ * Used in monitoring and logging.
+ *
+ */
+ String getName();
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java
new file mode 100644
index 0000000..f40fc9e
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Processor to react on an event.
+ *
+ * EventExecutors should guarantee that the implementations are called only
+ * from one thread.
+ *
+ * @param <PAYLOAD>
+ */
+@FunctionalInterface
+public interface EventHandler<PAYLOAD> {
+
+ void onMessage(PAYLOAD payload, EventPublisher publisher);
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java
new file mode 100644
index 0000000..a47fb57
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Client interface to send a new event.
+ */
+public interface EventPublisher {
+
+ <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void
+ fireEvent(EVENT_TYPE event, PAYLOAD payload);
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
new file mode 100644
index 0000000..44d85f5
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * Simple async event processing utility.
+ * <p>
+ * Event queue handles a collection of event handlers and routes the incoming
+ * events to one (or more) event handler.
+ */
+public class EventQueue implements EventPublisher, AutoCloseable {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(EventQueue.class);
+
+ private final Map<Event, Map<EventExecutor, List<EventHandler>>> executors =
+ new HashMap<>();
+
+ private final AtomicLong queuedCount = new AtomicLong(0);
+
+ private final AtomicLong eventCount = new AtomicLong(0);
+
+ public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
+ EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
+
+ this.addHandler(event, new SingleThreadExecutor<>(
+ event.getName()), handler);
+ }
+
+ public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
+ EVENT_TYPE event,
+ EventExecutor<PAYLOAD> executor,
+ EventHandler<PAYLOAD> handler) {
+
+ executors.putIfAbsent(event, new HashMap<>());
+ executors.get(event).putIfAbsent(executor, new ArrayList<>());
+
+ executors.get(event)
+ .get(executor)
+ .add(handler);
+ }
+
+ /**
+ * Creates one executor with multiple event handlers.
+ */
+ public void addHandlerGroup(String name, HandlerForEvent<?>...
+ eventsAndHandlers) {
+ SingleThreadExecutor sharedExecutor =
+ new SingleThreadExecutor(name);
+ for (HandlerForEvent handlerForEvent : eventsAndHandlers) {
+ addHandler(handlerForEvent.event, sharedExecutor,
+ handlerForEvent.handler);
+ }
+
+ }
+
+ /**
+ * Route an event with payload to the right listener(s).
+ *
+ * @param event The event identifier
+ * @param payload The payload of the event.
+ * @throws IllegalArgumentException If there is no EventHandler for
+ * the specific event.
+ */
+ public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
+ EVENT_TYPE event, PAYLOAD payload) {
+
+ Map<EventExecutor, List<EventHandler>> eventExecutorListMap =
+ this.executors.get(event);
+
+ eventCount.incrementAndGet();
+ if (eventExecutorListMap != null) {
+
+ for (Map.Entry<EventExecutor, List<EventHandler>> executorAndHandlers :
+ eventExecutorListMap.entrySet()) {
+
+ for (EventHandler handler : executorAndHandlers.getValue()) {
+ queuedCount.incrementAndGet();
+
+ executorAndHandlers.getKey()
+ .onMessage(handler, payload, this);
+
+ }
+ }
+
+ } else {
+ throw new IllegalArgumentException(
+ "No event handler registered for event " + event);
+ }
+
+ }
+
+ /**
+ * This is just for unit testing, don't use it for production code.
+ * <p>
+ * It waits for all messages to be processed. If one event handler invokes an
+ * other one, the later one also should be finished.
+ * <p>
+ * Long counter overflow is not handled, therefore it's safe only for unit
+ * testing.
+ * <p>
+ * This method is just eventually consistent. In some cases it could return
+ * even if there are new messages in some of the handler. But in a simple
+ * case (one message) it will return only if the message is processed and
+ * all the dependent messages (messages which are sent by current handlers)
+ * are processed.
+ *
+ * @param timeout Timeout in seconds to wait for the processing.
+ */
+ @VisibleForTesting
+ public void processAll(long timeout) {
+ long currentTime = Time.now();
+ while (true) {
+
+ long processed = 0;
+
+ Stream<EventExecutor> allExecutor = this.executors.values().stream()
+ .flatMap(handlerMap -> handlerMap.keySet().stream());
+
+ boolean allIdle =
+ allExecutor.allMatch(executor -> executor.queuedEvents() == executor
+ .successfulEvents() + executor.failedEvents());
+
+ if (allIdle) {
+ return;
+ }
+
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ if (Time.now() > currentTime + timeout) {
+ throw new AssertionError(
+ "Messages are not processed in the given timeframe. Queued: "
+ + queuedCount.get() + " Processed: " + processed);
+ }
+ }
+ }
+
+ public void close() {
+
+ Set<EventExecutor> allExecutors = this.executors.values().stream()
+ .flatMap(handlerMap -> handlerMap.keySet().stream())
+ .collect(Collectors.toSet());
+
+ allExecutors.forEach(executor -> {
+ try {
+ executor.close();
+ } catch (Exception ex) {
+ LOG.error("Can't close the executor " + executor.getName(), ex);
+ }
+ });
+ }
+
+ /**
+ * Event identifier together with the handler.
+ *
+ * @param <PAYLOAD>
+ */
+ public static class HandlerForEvent<PAYLOAD> {
+
+ private final Event<PAYLOAD> event;
+
+ private final EventHandler<PAYLOAD> handler;
+
+ public HandlerForEvent(
+ Event<PAYLOAD> event,
+ EventHandler<PAYLOAD> handler) {
+ this.event = event;
+ this.handler = handler;
+ }
+
+ public Event<PAYLOAD> getEvent() {
+ return event;
+ }
+
+ public EventHandler<PAYLOAD> getHandler() {
+ return handler;
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
new file mode 100644
index 0000000..a64e3d7
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Simple EventExecutor to call all the event handler one-by-one.
+ *
+ * @param <T>
+ */
+public class SingleThreadExecutor<T> implements EventExecutor<T> {
+
+ public static final String THREAD_NAME_PREFIX = "EventQueue";
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SingleThreadExecutor.class);
+
+ private final String name;
+
+ private final ThreadPoolExecutor executor;
+
+ private final AtomicLong queuedCount = new AtomicLong(0);
+
+ private final AtomicLong successfulCount = new AtomicLong(0);
+
+ private final AtomicLong failedCount = new AtomicLong(0);
+
+ public SingleThreadExecutor(String name) {
+ this.name = name;
+
+ LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>();
+ executor =
+ new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, workQueue,
+ runnable -> {
+ Thread thread = new Thread(runnable);
+ thread.setName(THREAD_NAME_PREFIX + "-" + name);
+ return thread;
+ });
+
+ }
+
+ @Override
+ public void onMessage(EventHandler<T> handler, T message, EventPublisher
+ publisher) {
+ queuedCount.incrementAndGet();
+ executor.execute(() -> {
+ try {
+ handler.onMessage(message, publisher);
+ successfulCount.incrementAndGet();
+ } catch (Exception ex) {
+ LOG.error("Error on execution message {}", message, ex);
+ failedCount.incrementAndGet();
+ }
+ });
+ }
+
+ @Override
+ public long failedEvents() {
+ return failedCount.get();
+ }
+
+ @Override
+ public long successfulEvents() {
+ return successfulCount.get();
+ }
+
+ @Override
+ public long queuedEvents() {
+ return queuedCount.get();
+ }
+
+ @Override
+ public void close() {
+ executor.shutdown();
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
new file mode 100644
index 0000000..c2159ad
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Basic event implementation to implement custom events.
+ *
+ * @param <T>
+ */
+public class TypedEvent<T> implements Event<T> {
+
+ private final Class<T> payloadType;
+
+ private final String name;
+
+ public TypedEvent(Class<T> payloadType, String name) {
+ this.payloadType = payloadType;
+ this.name = name;
+ }
+
+ public TypedEvent(Class<T> payloadType) {
+ this.payloadType = payloadType;
+ this.name = payloadType.getSimpleName();
+ }
+
+ @Override
+ public Class<T> getPayloadType() {
+ return payloadType;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java
new file mode 100644
index 0000000..89999ee
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.server.events;
+
+/**
+ * Simple event queue implementation for hdds/ozone components.
+ */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
new file mode 100644
index 0000000..3944409
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Testing the basic functionality of the event queue.
+ */
+public class TestEventQueue {
+
+ private static final Event<Long> EVENT1 =
+ new TypedEvent<>(Long.class, "SCM_EVENT1");
+ private static final Event<Long> EVENT2 =
+ new TypedEvent<>(Long.class, "SCM_EVENT2");
+
+ private static final Event<Long> EVENT3 =
+ new TypedEvent<>(Long.class, "SCM_EVENT3");
+ private static final Event<Long> EVENT4 =
+ new TypedEvent<>(Long.class, "SCM_EVENT4");
+
+ private EventQueue queue;
+
+ @Before
+ public void startEventQueue() {
+ queue = new EventQueue();
+ }
+
+ @After
+ public void stopEventQueue() {
+ queue.close();
+ }
+
+ @Test
+ public void simpleEvent() {
+
+ final long[] result = new long[2];
+
+ queue.addHandler(EVENT1, (payload, publisher) -> result[0] = payload);
+
+ queue.fireEvent(EVENT1, 11L);
+ queue.processAll(1000);
+ Assert.assertEquals(11, result[0]);
+
+ }
+
+ @Test
+ public void multipleSubscriber() {
+ final long[] result = new long[2];
+ queue.addHandler(EVENT2, (payload, publisher) -> result[0] = payload);
+
+ queue.addHandler(EVENT2, (payload, publisher) -> result[1] = payload);
+
+ queue.fireEvent(EVENT2, 23L);
+ queue.processAll(1000);
+ Assert.assertEquals(23, result[0]);
+ Assert.assertEquals(23, result[1]);
+
+ }
+
+ @Test
+ public void handlerGroup() {
+ final long[] result = new long[2];
+ queue.addHandlerGroup(
+ "group",
+ new EventQueue.HandlerForEvent<>(EVENT3, (payload, publisher) ->
+ result[0] = payload),
+ new EventQueue.HandlerForEvent<>(EVENT4, (payload, publisher) ->
+ result[1] = payload)
+ );
+
+ queue.fireEvent(EVENT3, 23L);
+ queue.fireEvent(EVENT4, 42L);
+
+ queue.processAll(1000);
+
+ Assert.assertEquals(23, result[0]);
+ Assert.assertEquals(42, result[1]);
+
+ Set<String> eventQueueThreadNames =
+ Thread.getAllStackTraces().keySet()
+ .stream()
+ .filter(t -> t.getName().startsWith(SingleThreadExecutor
+ .THREAD_NAME_PREFIX))
+ .map(Thread::getName)
+ .collect(Collectors.toSet());
+ System.out.println(eventQueueThreadNames);
+ Assert.assertEquals(1, eventQueueThreadNames.size());
+
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba12e880/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
new file mode 100644
index 0000000..bb05ef4
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import org.junit.Test;
+
+/**
+ * More realistic event test with sending event from one listener.
+ */
+public class TestEventQueueChain {
+
+ private static final Event<FailedNode> DECOMMISSION =
+ new TypedEvent<>(FailedNode.class);
+
+ private static final Event<FailedNode> DECOMMISSION_START =
+ new TypedEvent<>(FailedNode.class);
+
+ @Test
+ public void simpleEvent() {
+ EventQueue queue = new EventQueue();
+
+ queue.addHandler(DECOMMISSION, new PipelineManager());
+ queue.addHandler(DECOMMISSION_START, new NodeWatcher());
+
+ queue.fireEvent(DECOMMISSION, new FailedNode("node1"));
+
+ queue.processAll(5000);
+ }
+
+
+ static class FailedNode {
+ private final String nodeId;
+
+ FailedNode(String nodeId) {
+ this.nodeId = nodeId;
+ }
+
+ String getNodeId() {
+ return nodeId;
+ }
+ }
+
+ private static class PipelineManager implements EventHandler<FailedNode> {
+
+ @Override
+ public void onMessage(FailedNode message, EventPublisher publisher) {
+
+ System.out.println(
+ "Closing pipelines for all pipelines including node: " + message
+ .getNodeId());
+
+ publisher.fireEvent(DECOMMISSION_START, message);
+ }
+
+ }
+
+ private static class NodeWatcher implements EventHandler<FailedNode> {
+
+ @Override
+ public void onMessage(FailedNode message, EventPublisher publisher) {
+ System.out.println("Clear timer");
+ }
+ }
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[20/50] [abbrv] hadoop git commit: HDDS-21. Add support for rename
key within a bucket for rest client. Contributed by Lokesh Jain.
Posted by xy...@apache.org.
HDDS-21. Add support for rename key within a bucket for rest client. Contributed by Lokesh Jain.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41328556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41328556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41328556
Branch: refs/heads/HDDS-4
Commit: 413285569a897a4747880ee2c1f23a38609d1362
Parents: 0ff9456
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Sat May 12 17:40:29 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Sat May 12 17:40:29 2018 +0530
----------------------------------------------------------------------
.../hadoop/ozone/client/rest/RestClient.java | 16 ++++++-
.../ozone/client/rest/headers/Header.java | 2 +
.../ozone/client/rest/TestOzoneRestClient.java | 30 +++++++++++++
.../hadoop/ozone/web/handlers/KeyHandler.java | 45 ++++++++++++++++++++
.../hadoop/ozone/web/interfaces/Keys.java | 31 ++++++++++++++
5 files changed, 123 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/41328556/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index 1fd2091..ac71abe 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -678,7 +678,21 @@ public class RestClient implements ClientProtocol {
@Override
public void renameKey(String volumeName, String bucketName,
String fromKeyName, String toKeyName) throws IOException {
- throw new UnsupportedOperationException("Not yet implemented.");
+ try {
+ Preconditions.checkNotNull(volumeName);
+ Preconditions.checkNotNull(bucketName);
+ Preconditions.checkNotNull(fromKeyName);
+ Preconditions.checkNotNull(toKeyName);
+ URIBuilder builder = new URIBuilder(ozoneRestUri);
+ builder.setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName
+ + PATH_SEPARATOR + fromKeyName);
+ builder.addParameter(Header.OZONE_RENAME_TO_KEY_PARAM_NAME, toKeyName);
+ HttpPost httpPost = new HttpPost(builder.build());
+ addOzoneHeaders(httpPost);
+ EntityUtils.consume(executeHttpRequest(httpPost));
+ } catch (URISyntaxException e) {
+ throw new IOException(e);
+ }
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/41328556/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
index 00d4857..ebfc0a9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
@@ -65,6 +65,8 @@ public final class Header {
public static final String OZONE_LIST_QUERY_PREVKEY="prev-key";
public static final String OZONE_LIST_QUERY_ROOTSCAN="root-scan";
+ public static final String OZONE_RENAME_TO_KEY_PARAM_NAME = "toKey";
+
private Header() {
// Never constructed.
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/41328556/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index a94ee6c..9918d63 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -389,6 +389,36 @@ public class TestOzoneRestClient {
bucket.getKey(keyName);
}
+ @Test
+ public void testRenameKey()
+ throws IOException, OzoneException {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String fromKeyName = UUID.randomUUID().toString();
+ String value = "sample value";
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+ OzoneOutputStream out = bucket.createKey(fromKeyName,
+ value.getBytes().length, ReplicationType.STAND_ALONE,
+ ReplicationFactor.ONE);
+ out.write(value.getBytes());
+ out.close();
+ OzoneKey key = bucket.getKey(fromKeyName);
+ Assert.assertEquals(fromKeyName, key.getName());
+
+ String toKeyName = UUID.randomUUID().toString();
+ bucket.renameKey(fromKeyName, toKeyName);
+
+ key = bucket.getKey(toKeyName);
+ Assert.assertEquals(toKeyName, key.getName());
+
+ // Lookup for old key should fail.
+ thrown.expectMessage("Lookup key failed, error");
+ bucket.getKey(fromKeyName);
+ }
+
/**
* Close OzoneClient and shutdown MiniDFSCluster.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/41328556/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
index d4c5a79..8c0b103 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
@@ -242,4 +242,49 @@ public class KeyHandler implements Keys {
}
}.handleCall(volume, bucket, keys, req, headers, info, null);
}
+
+ /**
+ * Renames an existing key within a bucket.
+ *
+ * @param volume Storage Volume Name
+ * @param bucket Name of the bucket
+ * @param key Name of the Object
+ * @param toKeyName New name of the Object
+ * @param req http Request
+ * @param info UriInfo
+ * @param headers HttpHeaders
+ * @return Response
+ * @throws OzoneException
+ */
+ @Override
+ public Response renameKey(String volume, String bucket, String key,
+ String toKeyName, Request req, UriInfo info, HttpHeaders headers)
+ throws OzoneException {
+ return new KeyProcessTemplate() {
+ /**
+ * Abstract function that gets implemented in the KeyHandler functions.
+ * This function will just deal with the core file system related logic
+ * and will rely on handleCall function for repetitive error checks
+ *
+ * @param args - parsed bucket args, name, userName, ACLs etc
+ * @param input - The body as an Input Stream
+ * @param request - Http request
+ * @param headers - Parsed http Headers.
+ * @param info - UriInfo
+ *
+ * @return Response
+ *
+ * @throws IOException - From the file system operations
+ */
+ @Override
+ public Response doProcess(KeyArgs args, InputStream input,
+ Request request, HttpHeaders headers,
+ UriInfo info)
+ throws IOException, OzoneException, NoSuchAlgorithmException {
+ StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+ fs.renameKey(args, toKeyName);
+ return OzoneRestUtils.getResponse(args, HTTP_OK, "");
+ }
+ }.handleCall(volume, bucket, key, req, headers, info, null);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/41328556/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
index f9255f2..1ce81c2 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/Keys.java
@@ -29,6 +29,7 @@ import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
+import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
@@ -142,5 +143,35 @@ public interface Keys {
@PathParam("bucket") String bucket, @PathParam("keys") String keys,
@Context Request req, @Context UriInfo info, @Context HttpHeaders headers)
throws OzoneException;
+
+ /**
+ * Renames an existing key within a bucket.
+ *
+ * @param volume Storage Volume Name
+ * @param bucket Name of the bucket
+ * @param keys Name of the Object
+ * @param req http Request
+ * @param headers HttpHeaders
+ *
+ * @return Response
+ *
+ * @throws OzoneException
+ */
+ @POST
+ @ApiOperation("Renames an existing key within a bucket")
+ @ApiImplicitParams({
+ @ApiImplicitParam(name = "x-ozone-version", example = "v1", required =
+ true, paramType = "header"),
+ @ApiImplicitParam(name = "x-ozone-user", example = "user", required =
+ true, paramType = "header"),
+ @ApiImplicitParam(name = "Date", example = "Date: Mon, 26 Jun 2017 "
+ + "04:23:30 GMT", required = true, paramType = "header"),
+ @ApiImplicitParam(name = "Authorization", example = "OZONE", required =
+ true, paramType = "header")})
+ Response renameKey(@PathParam("volume") String volume,
+ @PathParam("bucket") String bucket, @PathParam("keys") String keys,
+ @QueryParam(Header.OZONE_RENAME_TO_KEY_PARAM_NAME) String toKeyName,
+ @Context Request req, @Context UriInfo info, @Context HttpHeaders headers)
+ throws OzoneException;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/50] [abbrv] hadoop git commit: YARN-8268. Fair scheduler:
reservable queue is configured both as parent and leaf queue. (Gergo Repas
via Haibo Chen)
Posted by xy...@apache.org.
YARN-8268. Fair scheduler: reservable queue is configured both as parent and leaf queue. (Gergo Repas via Haibo Chen)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f10a360
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f10a360
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f10a360
Branch: refs/heads/HDDS-4
Commit: 1f10a360219c91ac13d31bdb5c8d302b1b45afc3
Parents: 8f7912e
Author: Haibo Chen <ha...@apache.org>
Authored: Fri May 11 11:28:05 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Fri May 11 11:34:00 2018 -0700
----------------------------------------------------------------------
.../fair/allocation/AllocationFileQueueParser.java | 5 ++++-
.../scheduler/fair/TestAllocationFileLoaderService.java | 10 ++++++++++
2 files changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f10a360/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
index ec7e4a4..d5a436e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
@@ -217,7 +217,10 @@ public class AllocationFileQueueParser {
// if a leaf in the alloc file is marked as type='parent'
// then store it as a parent queue
if (isLeaf && !"parent".equals(element.getAttribute("type"))) {
- builder.configuredQueues(FSQueueType.LEAF, queueName);
+ // reservable queue has been already configured as parent
+ if (!isReservable) {
+ builder.configuredQueues(FSQueueType.LEAF, queueName);
+ }
} else {
if (isReservable) {
throw new AllocationConfigurationException("The configuration settings"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f10a360/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
index 5522333..8591d67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
@@ -42,6 +42,9 @@ import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
@@ -801,6 +804,13 @@ public class TestAllocationFileLoaderService {
String nonreservableQueueName = "root.other";
assertFalse(allocConf.isReservable(nonreservableQueueName));
assertTrue(allocConf.isReservable(reservableQueueName));
+ Map<FSQueueType, Set<String>> configuredQueues =
+ allocConf.getConfiguredQueues();
+ assertTrue("reservable queue is expected be to a parent queue",
+ configuredQueues.get(FSQueueType.PARENT).contains(reservableQueueName));
+ assertFalse("reservable queue should not be a leaf queue",
+ configuredQueues.get(FSQueueType.LEAF)
+ .contains(reservableQueueName));
assertTrue(allocConf.getMoveOnExpiry(reservableQueueName));
assertEquals(ReservationSchedulerConfiguration.DEFAULT_RESERVATION_WINDOW,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/50] [abbrv] hadoop git commit: YARN-8243. Flex down should remove
instance with largest component instance ID first. Contributed by Gour Saha
Posted by xy...@apache.org.
YARN-8243. Flex down should remove instance with largest component instance ID first. Contributed by Gour Saha
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca612e35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca612e35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca612e35
Branch: refs/heads/HDDS-4
Commit: ca612e353fc3e3766868ec0816de035e48b1f5b4
Parents: dc91299
Author: Billie Rinaldi <bi...@apache.org>
Authored: Fri May 11 07:27:35 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Fri May 11 12:49:05 2018 -0700
----------------------------------------------------------------------
.../hadoop/yarn/service/ServiceManager.java | 5 +--
.../hadoop/yarn/service/ServiceMaster.java | 39 +++++++++----------
.../yarn/service/component/Component.java | 26 ++++++++-----
.../component/instance/ComponentInstance.java | 9 +----
.../yarn/service/TestYarnNativeServices.java | 40 +++++++++++++++++++-
5 files changed, 75 insertions(+), 44 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca612e35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
index 869d7f3..e6a38dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
@@ -237,12 +237,11 @@ public class ServiceManager implements EventHandler<ServiceEvent> {
* ServiceMaster.checkAndUpdateServiceState here to make it easy to fix
* this in future.
*/
- public void checkAndUpdateServiceState(boolean isIncrement) {
+ public void checkAndUpdateServiceState() {
writeLock.lock();
try {
if (!getState().equals(State.UPGRADING)) {
- ServiceMaster.checkAndUpdateServiceState(this.scheduler,
- isIncrement);
+ ServiceMaster.checkAndUpdateServiceState(this.scheduler);
}
} finally {
writeLock.unlock();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca612e35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
index 0383a65..28881ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
@@ -264,30 +264,25 @@ public class ServiceMaster extends CompositeService {
// This method should be called whenever there is an increment or decrement
// of a READY state component of a service
public static synchronized void checkAndUpdateServiceState(
- ServiceScheduler scheduler, boolean isIncrement) {
+ ServiceScheduler scheduler) {
ServiceState curState = scheduler.getApp().getState();
- if (!isIncrement) {
- // set it to STARTED every time a component moves out of STABLE state
- scheduler.getApp().setState(ServiceState.STARTED);
- } else {
- // otherwise check the state of all components
- boolean isStable = true;
- for (org.apache.hadoop.yarn.service.api.records.Component comp : scheduler
- .getApp().getComponents()) {
- if (comp.getState() !=
- org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE) {
- isStable = false;
- break;
- }
+ // Check the state of all components
+ boolean isStable = true;
+ for (org.apache.hadoop.yarn.service.api.records.Component comp : scheduler
+ .getApp().getComponents()) {
+ if (comp.getState() !=
+ org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE) {
+ isStable = false;
+ break;
}
- if (isStable) {
- scheduler.getApp().setState(ServiceState.STABLE);
- } else {
- // mark new state as started only if current state is stable, otherwise
- // leave it as is
- if (curState == ServiceState.STABLE) {
- scheduler.getApp().setState(ServiceState.STARTED);
- }
+ }
+ if (isStable) {
+ scheduler.getApp().setState(ServiceState.STABLE);
+ } else {
+ // mark new state as started only if current state is stable, otherwise
+ // leave it as is
+ if (curState == ServiceState.STABLE) {
+ scheduler.getApp().setState(ServiceState.STARTED);
}
}
if (curState != scheduler.getApp().getState()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca612e35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index e115841..7979c19 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -323,7 +323,7 @@ public class Component implements EventHandler<ComponentEvent> {
org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
component.getScheduler().getApp().setState(ServiceState.STARTED);
return FLEXING;
- } else if (delta < 0){
+ } else if (delta < 0) {
delta = 0 - delta;
// scale down
LOG.info("[FLEX DOWN COMPONENT " + component.getName()
@@ -343,7 +343,9 @@ public class Component implements EventHandler<ComponentEvent> {
instance.destroy();
}
checkAndUpdateComponentState(component, false);
- return STABLE;
+ return component.componentSpec.getState()
+ == org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE
+ ? STABLE : FLEXING;
} else {
LOG.info("[FLEX COMPONENT " + component.getName() + "]: already has " +
event.getDesired() + " instances, ignoring");
@@ -440,7 +442,7 @@ public class Component implements EventHandler<ComponentEvent> {
component.componentSpec.getState());
}
// component state change will trigger re-check of service state
- component.context.getServiceManager().checkAndUpdateServiceState(true);
+ component.context.getServiceManager().checkAndUpdateServiceState();
}
} else {
// container moving out of READY state could be because of FLEX down so
@@ -449,14 +451,18 @@ public class Component implements EventHandler<ComponentEvent> {
.value() < component.componentMetrics.containersDesired.value()) {
component.componentSpec.setState(
org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
- if (curState != component.componentSpec.getState()) {
- LOG.info("[COMPONENT {}] state changed from {} -> {}",
- component.componentSpec.getName(), curState,
- component.componentSpec.getState());
- }
- // component state change will trigger re-check of service state
- component.context.getServiceManager().checkAndUpdateServiceState(false);
+ } else if (component.componentMetrics.containersReady
+ .value() == component.componentMetrics.containersDesired.value()) {
+ component.componentSpec.setState(
+ org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+ }
+ if (curState != component.componentSpec.getState()) {
+ LOG.info("[COMPONENT {}] state changed from {} -> {}",
+ component.componentSpec.getName(), curState,
+ component.componentSpec.getState());
}
+ // component state change will trigger re-check of service state
+ component.context.getServiceManager().checkAndUpdateServiceState();
}
// when the service is stable then the state of component needs to
// transition to stable
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca612e35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 9d0a56b..4aca0ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -581,14 +581,7 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
@Override
public int compareTo(ComponentInstance to) {
- long delta = containerStartedTime - to.containerStartedTime;
- if (delta == 0) {
- return getCompInstanceId().compareTo(to.getCompInstanceId());
- } else if (delta < 0) {
- return -1;
- } else {
- return 1;
- }
+ return getCompInstanceId().compareTo(to.getCompInstanceId());
}
@Override public boolean equals(Object o) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca612e35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 5b608e3..ae209b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -484,8 +484,37 @@ public class TestYarnNativeServices extends ServiceTestUtils {
}
}
- // Flex compa up to 4, which is more containers than the no of NMs
+ // Flex compa up to 5, which is more containers than the no of NMs
Map<String, Long> compCounts = new HashMap<>();
+ compCounts.put("compa", 5L);
+ exampleApp.getComponent("compa").setNumberOfContainers(5L);
+ client.flexByRestService(exampleApp.getName(), compCounts);
+ try {
+ // 10 secs is enough for the container to be started. The down side of
+ // this test is that it has to wait that long. Setting a higher wait time
+ // will add to the total time taken by tests to run.
+ waitForServiceToBeStable(client, exampleApp, 10000);
+ Assert.fail("Service should not be in a stable state. It should throw "
+ + "a timeout exception.");
+ } catch (Exception e) {
+ // Check that service state is not STABLE and only 3 containers are
+ // running and the fourth one should not get allocated.
+ service = client.getStatus(exampleApp.getName());
+ component = service.getComponent("compa");
+ Assert.assertNotEquals("Service state should not be STABLE",
+ ServiceState.STABLE, service.getState());
+ Assert.assertEquals("Component state should be FLEXING",
+ ComponentState.FLEXING, component.getState());
+ Assert.assertEquals("3 containers are expected to be running", 3,
+ component.getContainers().size());
+ }
+
+ // Flex compa down to 4 now, which is still more containers than the no of
+ // NMs. This tests the usecase that flex down does not kill any of the
+ // currently running containers since the required number of containers are
+ // still higher than the currently running number of containers. However,
+ // component state will still be FLEXING and service state not STABLE.
+ compCounts = new HashMap<>();
compCounts.put("compa", 4L);
exampleApp.getComponent("compa").setNumberOfContainers(4L);
client.flexByRestService(exampleApp.getName(), compCounts);
@@ -509,6 +538,15 @@ public class TestYarnNativeServices extends ServiceTestUtils {
component.getContainers().size());
}
+ // Finally flex compa down to 3, which is exactly the number of containers
+ // currently running. This will bring the component and service states to
+ // STABLE.
+ compCounts = new HashMap<>();
+ compCounts.put("compa", 3L);
+ exampleApp.getComponent("compa").setNumberOfContainers(3L);
+ client.flexByRestService(exampleApp.getName(), compCounts);
+ waitForServiceToBeStable(client, exampleApp);
+
LOG.info("Stop/destroy service {}", exampleApp);
client.actionStop(exampleApp.getName(), true);
client.actionDestroy(exampleApp.getName());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[26/50] [abbrv] hadoop git commit: HDDS-51. Fix
TestDeletedBlockLog#testDeletedBlockTransactions. Contributed by Mukul Kumar
Singh.
Posted by xy...@apache.org.
HDDS-51. Fix TestDeletedBlockLog#testDeletedBlockTransactions.
Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32cbd0cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32cbd0cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32cbd0cf
Branch: refs/heads/HDDS-4
Commit: 32cbd0cfd696a8293ac1c726081da9191cacecee
Parents: 53c8ebc
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 13:37:34 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sat May 12 13:37:34 2018 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/32cbd0cf/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index f872e23..8c12806 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.block;
import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -88,12 +89,10 @@ public class TestDeletedBlockLog {
int continerIDBase = random.nextInt(100);
int localIDBase = random.nextInt(1000);
for (int i = 0; i < dataSize; i++) {
- //String containerName = "container-" + UUID.randomUUID().toString();
long containerID = continerIDBase + i;
List<Long> blocks = new ArrayList<>();
int blockSize = random.nextInt(30) + 1;
for (int j = 0; j < blockSize; j++) {
- //blocks.add("block-" + UUID.randomUUID().toString());
long localID = localIDBase + j;
blocks.add(localID);
}
@@ -266,7 +265,7 @@ public class TestDeletedBlockLog {
int count = 0;
long containerID = 0L;
- DatanodeDetails dnDd1 = DatanodeDetails.newBuilder()
+ DatanodeDetails dnId1 = DatanodeDetails.newBuilder()
.setUuid(UUID.randomUUID().toString())
.setIpAddress("127.0.0.1")
.setHostName("localhost")
@@ -293,7 +292,7 @@ public class TestDeletedBlockLog {
// make TX[1-6] for datanode1; TX[7-10] for datanode2
if (count <= (maximumAllowedTXNum + 1)) {
- mockContainerInfo(mappingService, containerID, dnDd1);
+ mockContainerInfo(mappingService, containerID, dnId1);
} else {
mockContainerInfo(mappingService, containerID, dnId2);
}
@@ -323,7 +322,7 @@ public class TestDeletedBlockLog {
Assert.assertFalse(transactions.isFull());
// The number of TX in dnID1 won't more than maximum value.
Assert.assertEquals(maximumAllowedTXNum,
- transactions.getDatanodeTransactions(dnDd1.getUuid()).size());
+ transactions.getDatanodeTransactions(dnId1.getUuid()).size());
int size = transactions.getDatanodeTransactions(dnId2.getUuid()).size();
// add duplicated container in dnID2, this should be failed.
@@ -339,6 +338,7 @@ public class TestDeletedBlockLog {
transactions.getDatanodeTransactions(dnId2.getUuid()).size());
// Add new TX in dnID2, then dnID2 will reach maximum value.
+ containerID = RandomUtils.nextLong();
builder = DeletedBlocksTransaction.newBuilder();
builder.setTxID(12);
builder.setContainerID(containerID);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[35/50] [abbrv] hadoop git commit: HDFS-13544. Improve logging for
JournalNode in federated cluster.
Posted by xy...@apache.org.
HDFS-13544. Improve logging for JournalNode in federated cluster.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6beb25ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6beb25ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6beb25ab
Branch: refs/heads/HDDS-4
Commit: 6beb25ab7e4f5454dba0315a296081e61753f301
Parents: 6653f4b
Author: Hanisha Koneru <ha...@apache.org>
Authored: Mon May 14 10:12:08 2018 -0700
Committer: Hanisha Koneru <ha...@apache.org>
Committed: Mon May 14 10:12:08 2018 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/qjournal/server/Journal.java | 115 +++++++++++--------
1 file changed, 64 insertions(+), 51 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6beb25ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 408ce76..452664a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -208,11 +208,12 @@ public class Journal implements Closeable {
while (!files.isEmpty()) {
EditLogFile latestLog = files.remove(files.size() - 1);
latestLog.scanLog(Long.MAX_VALUE, false);
- LOG.info("Latest log is " + latestLog);
+ LOG.info("Latest log is " + latestLog + " ; journal id: " + journalId);
if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
// the log contains no transactions
LOG.warn("Latest log " + latestLog + " has no transactions. " +
- "moving it aside and looking for previous log");
+ "moving it aside and looking for previous log"
+ + " ; journal id: " + journalId);
latestLog.moveAsideEmptyFile();
} else {
return latestLog;
@@ -230,7 +231,7 @@ public class Journal implements Closeable {
Preconditions.checkState(nsInfo.getNamespaceID() != 0,
"can't format with uninitialized namespace info: %s",
nsInfo);
- LOG.info("Formatting " + this + " with namespace info: " +
+ LOG.info("Formatting journal id : " + journalId + " with namespace info: " +
nsInfo);
storage.format(nsInfo);
refreshCachedData();
@@ -323,7 +324,7 @@ public class Journal implements Closeable {
// any other that we've promised.
if (epoch <= getLastPromisedEpoch()) {
throw new IOException("Proposed epoch " + epoch + " <= last promise " +
- getLastPromisedEpoch());
+ getLastPromisedEpoch() + " ; journal id: " + journalId);
}
updateLastPromisedEpoch(epoch);
@@ -343,7 +344,8 @@ public class Journal implements Closeable {
private void updateLastPromisedEpoch(long newEpoch) throws IOException {
LOG.info("Updating lastPromisedEpoch from " + lastPromisedEpoch.get() +
- " to " + newEpoch + " for client " + Server.getRemoteIp());
+ " to " + newEpoch + " for client " + Server.getRemoteIp() +
+ " ; journal id: " + journalId);
lastPromisedEpoch.set(newEpoch);
// Since we have a new writer, reset the IPC serial - it will start
@@ -378,7 +380,7 @@ public class Journal implements Closeable {
}
checkSync(curSegment != null,
- "Can't write, no segment open");
+ "Can't write, no segment open" + " ; journal id: " + journalId);
if (curSegmentTxId != segmentTxId) {
// Sanity check: it is possible that the writer will fail IPCs
@@ -389,17 +391,20 @@ public class Journal implements Closeable {
// and throw an exception.
JournalOutOfSyncException e = new JournalOutOfSyncException(
"Writer out of sync: it thinks it is writing segment " + segmentTxId
- + " but current segment is " + curSegmentTxId);
+ + " but current segment is " + curSegmentTxId
+ + " ; journal id: " + journalId);
abortCurSegment();
throw e;
}
checkSync(nextTxId == firstTxnId,
- "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId);
+ "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId
+ + " ; journal id: " + journalId);
long lastTxnId = firstTxnId + numTxns - 1;
if (LOG.isTraceEnabled()) {
- LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId);
+ LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId +
+ " ; journal id: " + journalId);
}
// If the edit has already been marked as committed, we know
@@ -423,7 +428,7 @@ public class Journal implements Closeable {
if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) {
LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
- " took " + milliSeconds + "ms");
+ " took " + milliSeconds + "ms" + " ; journal id: " + journalId);
}
if (isLagging) {
@@ -455,7 +460,7 @@ public class Journal implements Closeable {
if (reqInfo.getEpoch() < lastPromisedEpoch.get()) {
throw new IOException("IPC's epoch " + reqInfo.getEpoch() +
" is less than the last promised epoch " +
- lastPromisedEpoch.get());
+ lastPromisedEpoch.get() + " ; journal id: " + journalId);
} else if (reqInfo.getEpoch() > lastPromisedEpoch.get()) {
// A newer client has arrived. Fence any previous writers by updating
// the promise.
@@ -465,16 +470,16 @@ public class Journal implements Closeable {
// Ensure that the IPCs are arriving in-order as expected.
checkSync(reqInfo.getIpcSerialNumber() > currentEpochIpcSerial,
"IPC serial %s from client %s was not higher than prior highest " +
- "IPC serial %s", reqInfo.getIpcSerialNumber(),
- Server.getRemoteIp(),
- currentEpochIpcSerial);
+ "IPC serial %s ; journal id: %s", reqInfo.getIpcSerialNumber(),
+ Server.getRemoteIp(), currentEpochIpcSerial, journalId);
currentEpochIpcSerial = reqInfo.getIpcSerialNumber();
if (reqInfo.hasCommittedTxId()) {
Preconditions.checkArgument(
reqInfo.getCommittedTxId() >= committedTxnId.get(),
"Client trying to move committed txid backward from " +
- committedTxnId.get() + " to " + reqInfo.getCommittedTxId());
+ committedTxnId.get() + " to " + reqInfo.getCommittedTxId() +
+ " ; journal id: " + journalId);
committedTxnId.set(reqInfo.getCommittedTxId());
}
@@ -486,7 +491,7 @@ public class Journal implements Closeable {
if (reqInfo.getEpoch() != lastWriterEpoch.get()) {
throw new IOException("IPC's epoch " + reqInfo.getEpoch() +
" is not the current writer epoch " +
- lastWriterEpoch.get());
+ lastWriterEpoch.get() + " ; journal id: " + journalId);
}
}
@@ -497,7 +502,8 @@ public class Journal implements Closeable {
private void checkFormatted() throws JournalNotFormattedException {
if (!isFormatted()) {
throw new JournalNotFormattedException("Journal " +
- storage.getSingularStorageDir() + " not formatted");
+ storage.getSingularStorageDir() + " not formatted" +
+ " ; journal id: " + journalId);
}
}
@@ -542,7 +548,8 @@ public class Journal implements Closeable {
if (curSegment != null) {
LOG.warn("Client is requesting a new log segment " + txid +
" though we are already writing " + curSegment + ". " +
- "Aborting the current segment in order to begin the new one.");
+ "Aborting the current segment in order to begin the new one." +
+ " ; journal id: " + journalId);
// The writer may have lost a connection to us and is now
// re-connecting after the connection came back.
// We should abort our own old segment.
@@ -556,7 +563,7 @@ public class Journal implements Closeable {
if (existing != null) {
if (!existing.isInProgress()) {
throw new IllegalStateException("Already have a finalized segment " +
- existing + " beginning at " + txid);
+ existing + " beginning at " + txid + " ; journal id: " + journalId);
}
// If it's in-progress, it should only contain one transaction,
@@ -565,7 +572,8 @@ public class Journal implements Closeable {
existing.scanLog(Long.MAX_VALUE, false);
if (existing.getLastTxId() != existing.getFirstTxId()) {
throw new IllegalStateException("The log file " +
- existing + " seems to contain valid transactions");
+ existing + " seems to contain valid transactions" +
+ " ; journal id: " + journalId);
}
}
@@ -573,7 +581,7 @@ public class Journal implements Closeable {
if (curLastWriterEpoch != reqInfo.getEpoch()) {
LOG.info("Updating lastWriterEpoch from " + curLastWriterEpoch +
" to " + reqInfo.getEpoch() + " for client " +
- Server.getRemoteIp());
+ Server.getRemoteIp() + " ; journal id: " + journalId);
lastWriterEpoch.set(reqInfo.getEpoch());
}
@@ -608,8 +616,8 @@ public class Journal implements Closeable {
checkSync(nextTxId == endTxId + 1,
"Trying to finalize in-progress log segment %s to end at " +
- "txid %s but only written up to txid %s",
- startTxId, endTxId, nextTxId - 1);
+ "txid %s but only written up to txid %s ; journal id: %s",
+ startTxId, endTxId, nextTxId - 1, journalId);
// No need to validate the edit log if the client is finalizing
// the log segment that it was just writing to.
needsValidation = false;
@@ -618,25 +626,27 @@ public class Journal implements Closeable {
FileJournalManager.EditLogFile elf = fjm.getLogFile(startTxId);
if (elf == null) {
throw new JournalOutOfSyncException("No log file to finalize at " +
- "transaction ID " + startTxId);
+ "transaction ID " + startTxId + " ; journal id: " + journalId);
}
if (elf.isInProgress()) {
if (needsValidation) {
LOG.info("Validating log segment " + elf.getFile() + " about to be " +
- "finalized");
+ "finalized ; journal id: " + journalId);
elf.scanLog(Long.MAX_VALUE, false);
checkSync(elf.getLastTxId() == endTxId,
"Trying to finalize in-progress log segment %s to end at " +
- "txid %s but log %s on disk only contains up to txid %s",
- startTxId, endTxId, elf.getFile(), elf.getLastTxId());
+ "txid %s but log %s on disk only contains up to txid %s " +
+ "; journal id: %s",
+ startTxId, endTxId, elf.getFile(), elf.getLastTxId(), journalId);
}
fjm.finalizeLogSegment(startTxId, endTxId);
} else {
Preconditions.checkArgument(endTxId == elf.getLastTxId(),
"Trying to re-finalize already finalized log " +
- elf + " with different endTxId " + endTxId);
+ elf + " with different endTxId " + endTxId +
+ " ; journal id: " + journalId);
}
// Once logs are finalized, a different length will never be decided.
@@ -667,7 +677,8 @@ public class Journal implements Closeable {
File paxosFile = storage.getPaxosFile(segmentTxId);
if (paxosFile.exists()) {
if (!paxosFile.delete()) {
- throw new IOException("Unable to delete paxos file " + paxosFile);
+ throw new IOException("Unable to delete paxos file " + paxosFile +
+ " ; journal id: " + journalId);
}
}
}
@@ -717,7 +728,7 @@ public class Journal implements Closeable {
}
if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
LOG.info("Edit log file " + elf + " appears to be empty. " +
- "Moving it aside...");
+ "Moving it aside..." + " ; journal id: " + journalId);
elf.moveAsideEmptyFile();
return null;
}
@@ -727,7 +738,7 @@ public class Journal implements Closeable {
.setIsInProgress(elf.isInProgress())
.build();
LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " +
- TextFormat.shortDebugString(ret));
+ TextFormat.shortDebugString(ret) + " ; journal id: " + journalId);
return ret;
}
@@ -771,7 +782,7 @@ public class Journal implements Closeable {
PrepareRecoveryResponseProto resp = builder.build();
LOG.info("Prepared recovery for segment " + segmentTxId + ": " +
- TextFormat.shortDebugString(resp));
+ TextFormat.shortDebugString(resp) + " ; journal id: " + journalId);
return resp;
}
@@ -792,8 +803,8 @@ public class Journal implements Closeable {
// at least one transaction.
Preconditions.checkArgument(segment.getEndTxId() > 0 &&
segment.getEndTxId() >= segmentTxId,
- "bad recovery state for segment %s: %s",
- segmentTxId, TextFormat.shortDebugString(segment));
+ "bad recovery state for segment %s: %s ; journal id: %s",
+ segmentTxId, TextFormat.shortDebugString(segment), journalId);
PersistedRecoveryPaxosData oldData = getPersistedPaxosData(segmentTxId);
PersistedRecoveryPaxosData newData = PersistedRecoveryPaxosData.newBuilder()
@@ -806,8 +817,9 @@ public class Journal implements Closeable {
// checkRequest() call above should filter non-increasing epoch numbers.
if (oldData != null) {
alwaysAssert(oldData.getAcceptedInEpoch() <= reqInfo.getEpoch(),
- "Bad paxos transition, out-of-order epochs.\nOld: %s\nNew: %s\n",
- oldData, newData);
+ "Bad paxos transition, out-of-order epochs.\nOld: %s\nNew: " +
+ "%s\nJournalId: %s\n",
+ oldData, newData, journalId);
}
File syncedFile = null;
@@ -817,7 +829,7 @@ public class Journal implements Closeable {
currentSegment.getEndTxId() != segment.getEndTxId()) {
if (currentSegment == null) {
LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
- ": no current segment in place");
+ ": no current segment in place ; journal id: " + journalId);
// Update the highest txid for lag metrics
updateHighestWrittenTxId(Math.max(segment.getEndTxId(),
@@ -825,7 +837,7 @@ public class Journal implements Closeable {
} else {
LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
": old segment " + TextFormat.shortDebugString(currentSegment) +
- " is not the right length");
+ " is not the right length ; journal id: " + journalId);
// Paranoid sanity check: if the new log is shorter than the log we
// currently have, we should not end up discarding any transactions
@@ -838,14 +850,15 @@ public class Journal implements Closeable {
" with new segment " +
TextFormat.shortDebugString(segment) +
": would discard already-committed txn " +
- committedTxnId.get());
+ committedTxnId.get() +
+ " ; journal id: " + journalId);
}
// Another paranoid check: we should not be asked to synchronize a log
// on top of a finalized segment.
alwaysAssert(currentSegment.getIsInProgress(),
- "Should never be asked to synchronize a different log on top of an " +
- "already-finalized segment");
+ "Should never be asked to synchronize a different log on top of " +
+ "an already-finalized segment ; journal id: " + journalId);
// If we're shortening the log, update our highest txid
// used for lag metrics.
@@ -858,7 +871,7 @@ public class Journal implements Closeable {
} else {
LOG.info("Skipping download of log " +
TextFormat.shortDebugString(segment) +
- ": already have up-to-date logs");
+ ": already have up-to-date logs ; journal id: " + journalId);
}
// This is one of the few places in the protocol where we have a single
@@ -890,12 +903,12 @@ public class Journal implements Closeable {
}
LOG.info("Accepted recovery for segment " + segmentTxId + ": " +
- TextFormat.shortDebugString(newData));
+ TextFormat.shortDebugString(newData) + " ; journal id: " + journalId);
}
private LongRange txnRange(SegmentStateProto seg) {
Preconditions.checkArgument(seg.hasEndTxId(),
- "invalid segment: %s", seg);
+ "invalid segment: %s ; journal id: %s", seg, journalId);
return new LongRange(seg.getStartTxId(), seg.getEndTxId());
}
@@ -970,7 +983,7 @@ public class Journal implements Closeable {
if (tmp.exists()) {
File dst = storage.getInProgressEditLog(segmentId);
LOG.info("Rolling forward previously half-completed synchronization: " +
- tmp + " -> " + dst);
+ tmp + " -> " + dst + " ; journal id: " + journalId);
FileUtil.replaceFile(tmp, dst);
}
}
@@ -991,8 +1004,8 @@ public class Journal implements Closeable {
PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in);
Preconditions.checkState(ret != null &&
ret.getSegmentState().getStartTxId() == segmentTxId,
- "Bad persisted data for segment %s: %s",
- segmentTxId, ret);
+ "Bad persisted data for segment %s: %s ; journal id: %s",
+ segmentTxId, ret, journalId);
return ret;
} finally {
IOUtils.closeStream(in);
@@ -1041,7 +1054,7 @@ public class Journal implements Closeable {
storage.cTime = sInfo.cTime;
int oldLV = storage.getLayoutVersion();
storage.layoutVersion = sInfo.layoutVersion;
- LOG.info("Starting upgrade of edits directory: "
+ LOG.info("Starting upgrade of edits directory: " + storage.getRoot()
+ ".\n old LV = " + oldLV
+ "; old CTime = " + oldCTime
+ ".\n new LV = " + storage.getLayoutVersion()
@@ -1112,7 +1125,7 @@ public class Journal implements Closeable {
if (endTxId <= committedTxnId.get()) {
if (!finalFile.getParentFile().exists()) {
LOG.error(finalFile.getParentFile() + " doesn't exist. Aborting tmp " +
- "segment move to current directory");
+ "segment move to current directory ; journal id: " + journalId);
return false;
}
Files.move(tmpFile.toPath(), finalFile.toPath(),
@@ -1122,13 +1135,13 @@ public class Journal implements Closeable {
} else {
success = false;
LOG.warn("Unable to move edits file from " + tmpFile + " to " +
- finalFile);
+ finalFile + " ; journal id: " + journalId);
}
} else {
success = false;
LOG.error("The endTxId of the temporary file is not less than the " +
"last committed transaction id. Aborting move to final file" +
- finalFile);
+ finalFile + " ; journal id: " + journalId);
}
return success;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/50] [abbrv] hadoop git commit: YARN-3610. FairScheduler: Add
steady-fair-shares to the REST API documentation. (Ray Chiang via Haibo Chen)
Posted by xy...@apache.org.
YARN-3610. FairScheduler: Add steady-fair-shares to the REST API documentation. (Ray Chiang via Haibo Chen)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50408cfc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50408cfc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50408cfc
Branch: refs/heads/HDDS-4
Commit: 50408cfc6987b554f8f8f3d6711f7fa61c6e6d6f
Parents: ca612e3
Author: Haibo Chen <ha...@apache.org>
Authored: Fri May 11 14:07:09 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Fri May 11 14:07:55 2018 -0700
----------------------------------------------------------------------
.../src/site/markdown/ResourceManagerRest.md | 118 +++++++++++++++++--
1 file changed, 110 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/50408cfc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index caeaf3e..a30677c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -1130,7 +1130,7 @@ Response Body:
| type | string | Scheduler type - fairScheduler |
| rootQueue | The root queue object | A collection of root queue resources |
-### Elements of the root queue object
+### Elements of all queue objects
| Item | Data Type | Description |
|:---- |:---- |:---- |
@@ -1142,17 +1142,23 @@ Response Body:
| clusterResources | A single resource object | The capacity of the cluster |
| queueName | string | The name of the queue |
| schedulingPolicy | string | The name of the scheduling policy used by the queue |
-| childQueues | array of queues(JSON)/queue objects(XML) | A collection of sub-queue information. Omitted if the queue has no childQueues. |
+| childQueues | array of queues(JSON)/queue objects(XML) | A collection of sub-queue information. Omitted if the queue has no childQueues or is a leaf queue. |
+| allocatedContainers | int | The number of allocated containers |
+| demandResources | A single resource object | The resources that have been requested by containers in this queue which have not been fulfilled by the scheduler |
+| pendingContainers | int | The number of pending containers |
+| preemptable | boolean | true if containers in this queue can be preempted |
+| reservedContainers | int | The number of reserved containers |
+| steadyFairResources | A single resource object | The steady fair share for the queue |
-### Elements of the queues object for a Leaf queue - contains all the elements in parent except 'childQueues' plus the following
+### Additional elements of leaf queue objects (with the exception of the 'childQueues' property)
| Item | Data Type | Description |
|:---- |:---- |:---- |
-| type | string | type of the queue - fairSchedulerLeafQueueInfo |
+| type | string | The type of the queue - fairSchedulerLeafQueueInfo |
| numActiveApps | int | The number of active applications in this queue |
| numPendingApps | int | The number of pending applications in this queue |
-### Elements of the resource object for resourcesUsed in queues
+### Elements of the (cluster/demand/fair/max/min/used/*)Resources object in queues
| Item | Data Type | Description |
|:---- |:---- |:---- |
@@ -1181,13 +1187,19 @@ Response Body:
"scheduler": {
"schedulerInfo": {
"rootQueue": {
+ "allocatedContainers": 0,
"childQueues": {
"queue": [
{
+ "allocatedContainers": 0,
"clusterResources": {
"memory": 8192,
"vCores": 8
},
+ "demandResources": {
+ "memory": 0,
+ "vCores": 0
+ },
"fairResources": {
"memory": 0,
"vCores": 0
@@ -1203,8 +1215,15 @@ Response Body:
},
"numActiveApps": 0,
"numPendingApps": 0,
+ "pendingContainers": 0,
+ "preemptable": true,
"queueName": "root.default",
+ "reservedContainers": 0,
"schedulingPolicy": "fair",
+ "steadyFairResources": {
+ "memory": 4096,
+ "vCores": 0
+ },
"type": "fairSchedulerLeafQueueInfo",
"usedResources": {
"memory": 0,
@@ -1212,12 +1231,18 @@ Response Body:
}
},
{
+ "allocatedContainers": 0,
"childQueues": {
"queue": [
{
+ "allocatedContainers": 0,
"clusterResources": {
"memory": 8192,
- "vCores": 8
+ "vCores": 8
+ },
+ "demandResources": {
+ "memory": 0,
+ "vCores": 0
},
"fairResources": {
"memory": 10000,
@@ -1234,8 +1259,15 @@ Response Body:
},
"numActiveApps": 0,
"numPendingApps": 0,
+ "pendingContainers": 0,
+ "preemptable": true,
"queueName": "root.sample_queue.sample_sub_queue",
+ "reservedContainers": 0,
"schedulingPolicy": "fair",
+ "steadyFairResources": {
+ "memory": 4096,
+ "vCores": 0
+ },
"type": "fairSchedulerLeafQueueInfo",
"usedResources": {
"memory": 0,
@@ -1248,6 +1280,10 @@ Response Body:
"memory": 8192,
"vCores": 8
},
+ "demandResources": {
+ "memory": 0,
+ "vCores": 0
+ },
"fairResources": {
"memory": 10000,
"vCores": 0
@@ -1261,19 +1297,30 @@ Response Body:
"memory": 10000,
"vCores": 0
},
+ "pendingContainers": 0,
+ "preemptable": true,
"queueName": "root.sample_queue",
+ "reservedContainers": 0,
"schedulingPolicy": "fair",
+ "steadyFairResources": {
+ "memory": 4096,
+ "vCores": 0
+ },
"usedResources": {
"memory": 0,
"vCores": 0
}
}
- ],
+ ]
},
"clusterResources": {
"memory": 8192,
"vCores": 8
},
+ "demandResources": {
+ "memory": 0,
+ "vCores": 0
+ },
"fairResources": {
"memory": 8192,
"vCores": 8
@@ -1287,8 +1334,15 @@ Response Body:
"memory": 0,
"vCores": 0
},
+ "pendingContainers": 0,
+ "preemptable": true,
"queueName": "root",
+ "reservedContainers": 0,
"schedulingPolicy": "fair",
+ "steadyFairResources": {
+ "memory": 8192,
+ "vCores": 8
+ },
"usedResources": {
"memory": 0,
"vCores": 0
@@ -1334,6 +1388,14 @@ Response Body:
<memory>0</memory>
<vCores>0</vCores>
</usedResources>
+ <demandResources>
+ <memory>0</memory>
+ <vCores>0</vCores>
+ </demandResources>
+ <steadyFairResources>
+ <memory>8192</memory>
+ <vCores>8</vCores>
+ </steadyFairResources>
<fairResources>
<memory>8192</memory>
<vCores>8</vCores>
@@ -1342,8 +1404,12 @@ Response Body:
<memory>8192</memory>
<vCores>8</vCores>
</clusterResources>
+ <pendingContainers>0</pendingContainers>
+ <allocatedContainers>0</allocatedContainers>
+ <reservedContainers>0</reservedContainers>
<queueName>root</queueName>
<schedulingPolicy>fair</schedulingPolicy>
+ <preemptable>true</preemptable>
<childQueues>
<queue xsi:type="fairSchedulerLeafQueueInfo">
<maxApps>2147483647</maxApps>
@@ -1359,6 +1425,14 @@ Response Body:
<memory>0</memory>
<vCores>0</vCores>
</usedResources>
+ <demandResources>
+ <memory>0</memory>
+ <vCores>0</vCores>
+ </demandResources>
+ <steadyFairResources>
+ <memory>4096</memory>
+ <vCores>0</vCores>
+ </steadyFairResources>
<fairResources>
<memory>0</memory>
<vCores>0</vCores>
@@ -1367,15 +1441,19 @@ Response Body:
<memory>8192</memory>
<vCores>8</vCores>
</clusterResources>
+ <pendingContainers>0</pendingContainers>
+ <allocatedContainers>0</allocatedContainers>
+ <reservedContainers>0</reservedContainers>
<queueName>root.default</queueName>
<schedulingPolicy>fair</schedulingPolicy>
+ <preemptable>true</preemptable>
<numPendingApps>0</numPendingApps>
<numActiveApps>0</numActiveApps>
</queue>
<queue>
<maxApps>50</maxApps>
<minResources>
- <memory>10000</memory>
+ <memory>0</memory>
<vCores>0</vCores>
</minResources>
<maxResources>
@@ -1386,6 +1464,14 @@ Response Body:
<memory>0</memory>
<vCores>0</vCores>
</usedResources>
+ <demandResources>
+ <memory>0</memory>
+ <vCores>0</vCores>
+ </demandResources>
+ <steadyFairResources>
+ <memory>4096</memory>
+ <vCores>0</vCores>
+ </steadyFairResources>
<fairResources>
<memory>10000</memory>
<vCores>0</vCores>
@@ -1394,8 +1480,12 @@ Response Body:
<memory>8192</memory>
<vCores>8</vCores>
</clusterResources>
+ <pendingContainers>0</pendingContainers>
+ <allocatedContainers>0</allocatedContainers>
+ <reservedContainers>0</reservedContainers>
<queueName>root.sample_queue</queueName>
<schedulingPolicy>fair</schedulingPolicy>
+ <preemptable>true</preemptable>
<childQueues>
<queue xsi:type="fairSchedulerLeafQueueInfo">
<maxApps>2147483647</maxApps>
@@ -1411,6 +1501,14 @@ Response Body:
<memory>0</memory>
<vCores>0</vCores>
</usedResources>
+ <demandResources>
+ <memory>0</memory>
+ <vCores>0</vCores>
+ </demandResources>
+ <steadyFairResources>
+ <memory>4096</memory>
+ <vCores>0</vCores>
+ </steadyFairResources>
<fairResources>
<memory>10000</memory>
<vCores>0</vCores>
@@ -1419,8 +1517,12 @@ Response Body:
<memory>8192</memory>
<vCores>8</vCores>
</clusterResources>
+ <pendingContainers>0</pendingContainers>
+ <allocatedContainers>0</allocatedContainers>
+ <reservedContainers>0</reservedContainers>
<queueName>root.sample_queue.sample_sub_queue</queueName>
<schedulingPolicy>fair</schedulingPolicy>
+ <preemptable>true</preemptable>
<numPendingApps>0</numPendingApps>
<numActiveApps>0</numActiveApps>
</queue>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/50] [abbrv] hadoop git commit: HDFS-13542.
TestBlockManager#testNeededReplicationWhileAppending fails due to improper
cluster shutdown in TestBlockManager#testBlockManagerMachinesArray on
Windows. Contributed by Anbang Hu.
Posted by xy...@apache.org.
HDFS-13542. TestBlockManager#testNeededReplicationWhileAppending fails due to improper cluster shutdown in TestBlockManager#testBlockManagerMachinesArray on Windows. Contributed by Anbang Hu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d50c4d71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d50c4d71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d50c4d71
Branch: refs/heads/HDDS-4
Commit: d50c4d71dc42576f96ae5c268856fd1a7795f936
Parents: a922b9c
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 11 09:47:57 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 11 09:47:57 2018 -0700
----------------------------------------------------------------------
.../blockmanagement/TestBlockManager.java | 155 ++++++++++---------
1 file changed, 85 insertions(+), 70 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d50c4d71/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 5219a44..58ca2e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -452,8 +452,8 @@ public class TestBlockManager {
String src = "/test-file";
Path file = new Path(src);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
- cluster.waitActive();
try {
+ cluster.waitActive();
BlockManager bm = cluster.getNamesystem().getBlockManager();
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
@@ -492,7 +492,9 @@ public class TestBlockManager {
IOUtils.closeStream(out);
}
} finally {
- cluster.shutdown();
+ if (cluster != null) {
+ cluster.shutdown();
+ }
}
}
@@ -1043,7 +1045,9 @@ public class TestBlockManager {
assertTrue(fs.exists(file1));
fs.delete(file1, true);
assertTrue(!fs.exists(file1));
- cluster.shutdown();
+ if (cluster != null) {
+ cluster.shutdown();
+ }
}
}
@@ -1143,7 +1147,9 @@ public class TestBlockManager {
assertEquals(0, bm.getBlockOpQueueLength());
assertTrue(doneLatch.await(1, TimeUnit.SECONDS));
} finally {
- cluster.shutdown();
+ if (cluster != null) {
+ cluster.shutdown();
+ }
}
}
@@ -1218,7 +1224,9 @@ public class TestBlockManager {
long batched = MetricsAsserts.getLongCounter("BlockOpsBatched", rb);
assertTrue(batched > 0);
} finally {
- cluster.shutdown();
+ if (cluster != null) {
+ cluster.shutdown();
+ }
}
}
@@ -1227,76 +1235,83 @@ public class TestBlockManager {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
- cluster.waitActive();
- BlockManager blockManager = cluster.getNamesystem().getBlockManager();
- FileSystem fs = cluster.getFileSystem();
- final Path filePath = new Path("/tmp.txt");
- final long fileLen = 1L;
- DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
- DFSTestUtil.waitForReplication((DistributedFileSystem)fs,
- filePath, (short) 3, 60000);
- ArrayList<DataNode> datanodes = cluster.getDataNodes();
- assertEquals(datanodes.size(), 4);
- FSNamesystem ns = cluster.getNamesystem();
- // get the block
- final String bpid = cluster.getNamesystem().getBlockPoolId();
- File storageDir = cluster.getInstanceStorageDir(0, 0);
- File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
- assertTrue("Data directory does not exist", dataDir.exists());
- BlockInfo blockInfo = blockManager.blocksMap.getBlocks().iterator().next();
- ExtendedBlock blk = new ExtendedBlock(bpid, blockInfo.getBlockId(),
- blockInfo.getNumBytes(), blockInfo.getGenerationStamp());
- DatanodeDescriptor failedStorageDataNode =
- blockManager.getStoredBlock(blockInfo).getDatanode(0);
- DatanodeDescriptor corruptStorageDataNode =
- blockManager.getStoredBlock(blockInfo).getDatanode(1);
-
- ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
- for(int i=0; i<failedStorageDataNode.getStorageInfos().length; i++) {
- DatanodeStorageInfo storageInfo = failedStorageDataNode
- .getStorageInfos()[i];
- DatanodeStorage dns = new DatanodeStorage(
- failedStorageDataNode.getStorageInfos()[i].getStorageID(),
- DatanodeStorage.State.FAILED,
- failedStorageDataNode.getStorageInfos()[i].getStorageType());
- while(storageInfo.getBlockIterator().hasNext()) {
- BlockInfo blockInfo1 = storageInfo.getBlockIterator().next();
- if(blockInfo1.equals(blockInfo)) {
- StorageReport report = new StorageReport(
- dns, true, storageInfo.getCapacity(),
- storageInfo.getDfsUsed(), storageInfo.getRemaining(),
- storageInfo.getBlockPoolUsed(), 0L);
- reports.add(report);
- break;
+ try {
+ cluster.waitActive();
+ BlockManager blockManager = cluster.getNamesystem().getBlockManager();
+ FileSystem fs = cluster.getFileSystem();
+ final Path filePath = new Path("/tmp.txt");
+ final long fileLen = 1L;
+ DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
+ DFSTestUtil.waitForReplication((DistributedFileSystem)fs,
+ filePath, (short) 3, 60000);
+ ArrayList<DataNode> datanodes = cluster.getDataNodes();
+ assertEquals(datanodes.size(), 4);
+ FSNamesystem ns = cluster.getNamesystem();
+ // get the block
+ final String bpid = cluster.getNamesystem().getBlockPoolId();
+ File storageDir = cluster.getInstanceStorageDir(0, 0);
+ File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+ assertTrue("Data directory does not exist", dataDir.exists());
+ BlockInfo blockInfo =
+ blockManager.blocksMap.getBlocks().iterator().next();
+ ExtendedBlock blk = new ExtendedBlock(bpid, blockInfo.getBlockId(),
+ blockInfo.getNumBytes(), blockInfo.getGenerationStamp());
+ DatanodeDescriptor failedStorageDataNode =
+ blockManager.getStoredBlock(blockInfo).getDatanode(0);
+ DatanodeDescriptor corruptStorageDataNode =
+ blockManager.getStoredBlock(blockInfo).getDatanode(1);
+
+ ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
+ for(int i=0; i<failedStorageDataNode.getStorageInfos().length; i++) {
+ DatanodeStorageInfo storageInfo = failedStorageDataNode
+ .getStorageInfos()[i];
+ DatanodeStorage dns = new DatanodeStorage(
+ failedStorageDataNode.getStorageInfos()[i].getStorageID(),
+ DatanodeStorage.State.FAILED,
+ failedStorageDataNode.getStorageInfos()[i].getStorageType());
+ while(storageInfo.getBlockIterator().hasNext()) {
+ BlockInfo blockInfo1 = storageInfo.getBlockIterator().next();
+ if(blockInfo1.equals(blockInfo)) {
+ StorageReport report = new StorageReport(
+ dns, true, storageInfo.getCapacity(),
+ storageInfo.getDfsUsed(), storageInfo.getRemaining(),
+ storageInfo.getBlockPoolUsed(), 0L);
+ reports.add(report);
+ break;
+ }
}
}
- }
- failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
- .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
- ns.writeLock();
- DatanodeStorageInfo corruptStorageInfo= null;
- for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
- corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
- while(corruptStorageInfo.getBlockIterator().hasNext()) {
- BlockInfo blockInfo1 = corruptStorageInfo.getBlockIterator().next();
- if (blockInfo1.equals(blockInfo)) {
- break;
+ failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
+ .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
+ ns.writeLock();
+ DatanodeStorageInfo corruptStorageInfo= null;
+ for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
+ corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
+ while(corruptStorageInfo.getBlockIterator().hasNext()) {
+ BlockInfo blockInfo1 = corruptStorageInfo.getBlockIterator().next();
+ if (blockInfo1.equals(blockInfo)) {
+ break;
+ }
}
}
+ blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
+ corruptStorageInfo.getStorageID(),
+ CorruptReplicasMap.Reason.ANY.toString());
+ ns.writeUnlock();
+ BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
+ ns.readLock();
+ LocatedBlocks locatedBlocks =
+ blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
+ false, false, null, null);
+ assertTrue("Located Blocks should exclude corrupt" +
+ "replicas and failed storages",
+ locatedBlocks.getLocatedBlocks().size() == 1);
+ ns.readUnlock();
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
}
- blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
- corruptStorageInfo.getStorageID(),
- CorruptReplicasMap.Reason.ANY.toString());
- ns.writeUnlock();
- BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
- ns.readLock();
- LocatedBlocks locatedBlocks =
- blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
- false, false, null, null);
- assertTrue("Located Blocks should exclude corrupt" +
- "replicas and failed storages",
- locatedBlocks.getLocatedBlocks().size() == 1);
- ns.readUnlock();
}
@Test
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/50] [abbrv] hadoop git commit: HDDS-40. Separating packaging of
Ozone/HDDS from the main Hadoop. Contributed by Elek, Marton.
Posted by xy...@apache.org.
HDDS-40. Separating packaging of Ozone/HDDS from the main Hadoop.
Contributed by Elek, Marton.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b4f24ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b4f24ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b4f24ad
Branch: refs/heads/HDDS-4
Commit: 4b4f24ad5f2b457ad215d469bf28cf9a799812bc
Parents: 50408cf
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 11 13:52:05 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri May 11 14:24:36 2018 -0700
----------------------------------------------------------------------
.gitignore | 5 +
dev-support/bin/dist-layout-stitching | 15 --
dev-support/bin/ozone-dist-layout-stitching | 153 +++++++++++++++++++
dev-support/bin/ozone-dist-tar-stitching | 48 ++++++
hadoop-dist/pom.xml | 152 +++++++++++-------
hadoop-dist/src/main/compose/ozone/.env | 2 +-
.../src/main/compose/ozone/docker-compose.yaml | 8 +-
.../hdfs/server/namenode/NameNodeUtils.java | 2 +-
hadoop-ozone/acceptance-test/README.md | 22 ++-
.../dev-support/bin/robot-all.sh | 18 +++
.../acceptance-test/dev-support/bin/robot.sh | 38 +++++
hadoop-ozone/acceptance-test/pom.xml | 29 +---
.../acceptance-test/src/test/compose/.env | 2 +-
.../src/test/compose/docker-compose.yaml | 8 +-
.../test/robotframework/acceptance/ozone.robot | 7 +-
15 files changed, 394 insertions(+), 115 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 440708a..3883ce2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -48,3 +48,8 @@ patchprocess/
.history/
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log
+
+#robotframework outputs
+log.html
+output.xml
+report.html
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/dev-support/bin/dist-layout-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/dist-layout-stitching b/dev-support/bin/dist-layout-stitching
index 6557161..584821a 100755
--- a/dev-support/bin/dist-layout-stitching
+++ b/dev-support/bin/dist-layout-stitching
@@ -146,21 +146,6 @@ run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-api/target/hadoop-client-
run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-${VERSION}.jar" share/hadoop/client/
run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-${VERSION}.jar" share/hadoop/client/
-# HDDS
-run copy "${ROOT}/hadoop-hdds/common/target/hadoop-hdds-common-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/framework/target/hadoop-hdds-server-framework-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/server-scm/target/hadoop-hdds-server-scm-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/container-service/target/hadoop-hdds-container-service-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/client/target/hadoop-hdds-client-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-hdds/tools/target/hadoop-hdds-tools-${HDDS_VERSION}" .
-
-# Ozone
-run copy "${ROOT}/hadoop-ozone/common/target/hadoop-ozone-common-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
-run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
-
run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERSION}" .
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/dev-support/bin/ozone-dist-layout-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching
new file mode 100755
index 0000000..1b0b224
--- /dev/null
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -0,0 +1,153 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# project.version
+VERSION=$1
+
+# project.build.directory
+BASEDIR=$2
+
+#hdds.version
+HDDS_VERSION=$3
+
+function run()
+{
+ declare res
+
+ echo "\$ ${*}"
+ "${@}"
+ res=$?
+ if [[ ${res} != 0 ]]; then
+ echo
+ echo "Failed!"
+ echo
+ exit "${res}"
+ fi
+}
+
+function findfileindir()
+{
+ declare file="$1"
+ declare dir="${2:-./share}"
+ declare count
+
+ count=$(find "${dir}" -iname "${file}" | wc -l)
+
+ #shellcheck disable=SC2086
+ echo ${count}
+}
+
+function copyifnotexists()
+{
+ declare src="$1"
+ declare dest="$2"
+
+ declare srcname
+ declare destdir
+
+ declare child
+ declare childpath
+
+ if [[ -f "${src}" ]]; then
+ srcname=${src##*/}
+ if [[ "${srcname}" != *.jar ||
+ $(findfileindir "${srcname}") -eq "0" ]]; then
+ destdir=$(dirname "${dest}")
+ mkdir -p "${destdir}"
+ cp -p "${src}" "${dest}"
+ fi
+ else
+ for childpath in "${src}"/*; do
+ child="${childpath##*/}"
+ if [[ "${child}" == "doc" ||
+ "${child}" == "webapps" ]]; then
+ mkdir -p "${dest}/${child}"
+ cp -r "${src}/${child}"/* "${dest}/${child}"
+ continue;
+ fi
+ copyifnotexists "${src}/${child}" "${dest}/${child}"
+ done
+ fi
+}
+
+#Copy all contents as is except the lib.
+#for libs check for existence in share directory, if not exist then only copy.
+function copy()
+{
+ declare src="$1"
+ declare dest="$2"
+
+ declare child
+ declare childpath
+
+ if [[ -d "${src}" ]]; then
+ for childpath in "${src}"/*; do
+ child="${childpath##*/}"
+
+ if [[ "${child}" == "share" ]]; then
+ copyifnotexists "${src}/${child}" "${dest}/${child}"
+ else
+ if [[ -d "${src}/${child}" ]]; then
+ mkdir -p "${dest}/${child}"
+ cp -pr "${src}/${child}"/* "${dest}/${child}"
+ else
+ cp -pr "${src}/${child}" "${dest}/${child}"
+ fi
+ fi
+ done
+ fi
+}
+
+# shellcheck disable=SC2164
+ROOT=$(cd "${BASEDIR}"/../..;pwd)
+echo
+echo "Current directory $(pwd)"
+echo
+run rm -rf "ozone"
+run mkdir "ozone"
+run cd "ozone"
+run cp -p "${ROOT}/LICENSE.txt" .
+run cp -p "${ROOT}/NOTICE.txt" .
+run cp -p "${ROOT}/README.txt" .
+
+# Copy hadoop-common first so that it have always have all dependencies.
+# Remaining projects will copy only libraries which are not present already in 'share' directory.
+run copy "${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" .
+run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
+run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}" .
+
+
+# HDDS
+run copy "${ROOT}/hadoop-hdds/common/target/hadoop-hdds-common-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/framework/target/hadoop-hdds-server-framework-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/server-scm/target/hadoop-hdds-server-scm-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/container-service/target/hadoop-hdds-container-service-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/client/target/hadoop-hdds-client-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/tools/target/hadoop-hdds-tools-${HDDS_VERSION}" .
+
+# Ozone
+run copy "${ROOT}/hadoop-ozone/common/target/hadoop-ozone-common-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+
+mkdir -p ./share/hadoop/mapreduce
+mkdir -p ./share/hadoop/yarn
+echo
+echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}"
+echo
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/dev-support/bin/ozone-dist-tar-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/ozone-dist-tar-stitching b/dev-support/bin/ozone-dist-tar-stitching
new file mode 100755
index 0000000..decfa23
--- /dev/null
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# project.version
+VERSION=$1
+
+# project.build.directory
+BASEDIR=$2
+
+function run()
+{
+ declare res
+
+ echo "\$ ${*}"
+ "${@}"
+ res=$?
+ if [[ ${res} != 0 ]]; then
+ echo
+ echo "Failed!"
+ echo
+ exit "${res}"
+ fi
+}
+
+#To make the final dist directory easily mountable from docker we don't use
+#version name in the directory name.
+#To include the version name in the root directory of the tar file
+# we create a symbolic link and dereference it during the tar creation
+ln -s -f ozone ozone-${VERSION}
+run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone"
+run gzip -f "ozone-${VERSION}.tar"
+echo
+echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"
+echo
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 43836eb..999d44c 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -13,8 +13,8 @@
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
@@ -168,10 +168,13 @@
</goals>
<configuration>
<executable>${shell-executable}</executable>
- <workingDirectory>${project.build.directory}</workingDirectory>
+ <workingDirectory>${project.build.directory}
+ </workingDirectory>
<requiresOnline>false</requiresOnline>
<arguments>
- <argument>${basedir}/../dev-support/bin/dist-layout-stitching</argument>
+ <argument>
+ ${basedir}/../dev-support/bin/dist-layout-stitching
+ </argument>
<argument>${project.version}</argument>
<argument>${project.build.directory}</argument>
<argument>${hdds.version}</argument>
@@ -182,14 +185,16 @@
<id>toolshooks</id>
<phase>prepare-package</phase>
<goals>
- <goal>exec</goal>
+ <goal>exec</goal>
</goals>
<configuration>
<executable>${shell-executable}</executable>
<workingDirectory>${basedir}</workingDirectory>
<requiresOnline>false</requiresOnline>
<arguments>
- <argument>${basedir}/../dev-support/bin/dist-tools-hooks-maker</argument>
+ <argument>
+ ${basedir}/../dev-support/bin/dist-tools-hooks-maker
+ </argument>
<argument>${project.version}</argument>
<argument>${project.build.directory}</argument>
<argument>${basedir}/../hadoop-tools</argument>
@@ -203,14 +208,16 @@
<goal>exec</goal>
</goals>
<configuration>
- <executable>${shell-executable}</executable>
- <workingDirectory>${project.build.directory}</workingDirectory>
- <requiresOnline>false</requiresOnline>
- <arguments>
- <argument>${basedir}/../dev-support/bin/dist-tar-stitching</argument>
- <argument>${project.version}</argument>
- <argument>${project.build.directory}</argument>
- </arguments>
+ <executable>${shell-executable}</executable>
+ <workingDirectory>${project.build.directory}
+ </workingDirectory>
+ <requiresOnline>false</requiresOnline>
+ <arguments>
+ <argument>${basedir}/../dev-support/bin/dist-tar-stitching
+ </argument>
+ <argument>${project.version}</argument>
+ <argument>${project.build.directory}</argument>
+ </arguments>
</configuration>
</execution>
</executions>
@@ -218,14 +225,12 @@
</plugins>
</build>
</profile>
-
<profile>
<id>hdds</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<dependencies>
-
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-ozone-manager</artifactId>
@@ -261,41 +266,86 @@
<plugins>
<plugin>
<artifactId>maven-resources-plugin</artifactId>
- <executions>
- <execution>
- <id>copy-docker-compose</id>
- <goals>
- <goal>copy-resources</goal>
- </goals>
- <phase>prepare-package</phase>
- <configuration>
- <outputDirectory>${project.build.directory}/compose</outputDirectory>
- <resources>
- <resource>
- <directory>src/main/compose</directory>
- <filtering>true</filtering>
- </resource>
- </resources>
- </configuration>
- </execution>
- <execution>
- <id>copy-dockerfile</id>
- <goals>
- <goal>copy-resources</goal>
- </goals>
- <phase>prepare-package</phase>
- <configuration>
- <outputDirectory>${project.build.directory}</outputDirectory>
- <resources>
- <resource>
- <directory>src/main/docker</directory>
- <filtering>true</filtering>
- </resource>
- </resources>
- </configuration>
- </execution>
- </executions>
- </plugin>
+ <executions>
+ <execution>
+ <id>copy-docker-compose</id>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <phase>prepare-package</phase>
+ <configuration>
+ <outputDirectory>${project.build.directory}/compose
+ </outputDirectory>
+ <resources>
+ <resource>
+ <directory>src/main/compose</directory>
+ <filtering>true</filtering>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ <execution>
+ <id>copy-dockerfile</id>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <phase>prepare-package</phase>
+ <configuration>
+ <outputDirectory>${project.build.directory}</outputDirectory>
+ <resources>
+ <resource>
+ <directory>src/main/docker</directory>
+ <filtering>true</filtering>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>dist-ozone</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <executable>${shell-executable}</executable>
+ <workingDirectory>${project.build.directory}
+ </workingDirectory>
+ <arguments>
+ <argument>
+ ${basedir}/../dev-support/bin/ozone-dist-layout-stitching
+ </argument>
+ <argument>${project.version}</argument>
+ <argument>${project.build.directory}</argument>
+ <argument>${hdds.version}</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>tar-ozone</id>
+ <phase>package</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <executable>${shell-executable}</executable>
+ <workingDirectory>${project.build.directory}
+ </workingDirectory>
+ <arguments>
+ <argument>${basedir}/../dev-support/bin/ozone-dist-tar-stitching
+ </argument>
+ <argument>${hdds.version}</argument>
+ <argument>${project.build.directory}</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
</plugins>
</build>
</profile>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-dist/src/main/compose/ozone/.env
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/.env b/hadoop-dist/src/main/compose/ozone/.env
index af20d3e..67eed25 100644
--- a/hadoop-dist/src/main/compose/ozone/.env
+++ b/hadoop-dist/src/main/compose/ozone/.env
@@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-VERSION=${project.version}
\ No newline at end of file
+HDDS_VERSION=${hdds.version}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index 13a7db6..faf420c 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -20,7 +20,7 @@ services:
image: apache/hadoop-runner
hostname: namenode
volumes:
- - ../..//hadoop-${VERSION}:/opt/hadoop
+ - ../../ozone:/opt/hadoop
ports:
- 9870:9870
environment:
@@ -31,7 +31,7 @@ services:
datanode:
image: apache/hadoop-runner
volumes:
- - ../..//hadoop-${VERSION}:/opt/hadoop
+ - ../../ozone:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/ozone","datanode"]
@@ -40,7 +40,7 @@ services:
ksm:
image: apache/hadoop-runner
volumes:
- - ../..//hadoop-${VERSION}:/opt/hadoop
+ - ../../ozone:/opt/hadoop
ports:
- 9874:9874
environment:
@@ -51,7 +51,7 @@ services:
scm:
image: apache/hadoop-runner
volumes:
- - ../..//hadoop-${VERSION}:/opt/hadoop
+ - ../../ozone:/opt/hadoop
ports:
- 9876:9876
env_file:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java
index 838a8e7..ec1d510 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeUtils.java
@@ -112,7 +112,7 @@ public final class NameNodeUtils {
}
if (port > 0) {
- return currentNnAddress;
+ return currentNnAddress;
} else {
// the port is missing or 0. Figure out real bind address later.
return null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-ozone/acceptance-test/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/README.md b/hadoop-ozone/acceptance-test/README.md
index 3a0ca49..2714e0a 100644
--- a/hadoop-ozone/acceptance-test/README.md
+++ b/hadoop-ozone/acceptance-test/README.md
@@ -20,19 +20,29 @@ This project contains acceptance tests for ozone/hdds using docker-compose and [
To run the acceptance tests, please activate the `ozone-acceptance-test` profile and do a full build.
-Typically you need a `mvn install -Phdds,ozone-acceptance-test,dist -DskipTests` for a build without unit tests but with acceptance test.
+```
+mvn clean install -Pdist -Phdds
+cd hadoop-ozone/acceptance-test
+mvn integration-test -Phdds,ozone-acceptance-test,dist -DskipTests
+```
Notes:
1. You need a hadoop build in hadoop-dist/target directory.
2. The `ozone-acceptance-test` could be activated with profile even if the unit tests are disabled.
-
+ 3. This method does not require the robot framework on path as jpython is used.
## Development
-You can run manually the robot tests with `robot` cli. (See robotframework docs to install it.)
+You can also run manually the robot tests with `robot` cli.
+ (See robotframework docs to install it: http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#installation-instructions)
+
+In the dev-support directory we have two wrapper scripts to run robot framework with local robot cli
+instead of calling it from maven.
- 1. Go to the `src/test/robotframework`
- 2. Execute `robot -v basedir:${PWD}/../../.. -v VERSION:3.2.0-SNAPSHOT .`
+It's useful during the development of the robot files as any robotframework cli
+arguments could be used.
-You can also use select just one test with -t `"*testnamefragment*"`
\ No newline at end of file
+ 1. `dev-support/bin/robot.sh` is the simple wrapper. The .robot file should be used as an argument.
+ 2. `dev-support/bin/robot-all.sh` will call the robot.sh with the main acceptance test directory,
+ which means all the acceptance tests will be executed.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
new file mode 100755
index 0000000..0e212a2
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+$DIR/robot.sh $DIR/../../src/test/robotframework/acceptance
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh b/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh
new file mode 100755
index 0000000..b651f76
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+#basedir is the directory of the whole hadoop project. Used to calculate the
+#exact path to the hadoop-dist project
+BASEDIR=${DIR}/../../../..
+
+if [ ! "$(which robot)" ] ; then
+ echo ""
+ echo "robot is not on your PATH."
+ echo ""
+ echo "Please install it according to the documentation:"
+ echo " http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#installation-instructions"
+ echo " (TLDR; most of the time you need: 'pip install robotframework')"
+ exit -1
+fi
+
+OZONEDISTDIR="$BASEDIR/hadoop-dist/target/ozone"
+if [ ! -d "$OZONEDISTDIR" ]; then
+ echo "Ozone can't be found in the $OZONEDISTDIR."
+ echo "You may need a full build with -Phdds and -Pdist profiles"
+ exit -1
+fi
+robot -v basedir:$BASEDIR $@
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-ozone/acceptance-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/pom.xml b/hadoop-ozone/acceptance-test/pom.xml
index fb6794c..ef45c44 100644
--- a/hadoop-ozone/acceptance-test/pom.xml
+++ b/hadoop-ozone/acceptance-test/pom.xml
@@ -28,32 +28,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<description>Apache Hadoop Ozone Acceptance Tests</description>
<name>Apache Hadoop Ozone Acceptance Tests</name>
<packaging>pom</packaging>
- <build>
- <plugins>
- <plugin>
- <artifactId>maven-resources-plugin</artifactId>
- <executions>
- <execution>
- <id>copy-docker-compose</id>
- <goals>
- <goal>copy-resources</goal>
- </goals>
- <phase>process-test-resources</phase>
- <configuration>
- <outputDirectory>${project.build.directory}/compose
- </outputDirectory>
- <resources>
- <resource>
- <directory>src/test/compose</directory>
- <filtering>true</filtering>
- </resource>
- </resources>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
<profiles>
<profile>
<id>ozone-acceptance-test</id>
@@ -70,8 +44,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</goals>
<configuration>
<variables>
- <variable>version:${project.version}</variable>
- <variable>basedir:${project.basedir}</variable>
+ <variable>basedir:${project.basedir}/../..</variable>
</variables>
<skip>false</skip>
<skipTests>false</skipTests>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-ozone/acceptance-test/src/test/compose/.env
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/.env b/hadoop-ozone/acceptance-test/src/test/compose/.env
index 79f890b..cf22168 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/.env
+++ b/hadoop-ozone/acceptance-test/src/test/compose/.env
@@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-HADOOPDIR=../../hadoop-dist/target/hadoop-${project.version}
\ No newline at end of file
+OZONEDIR=../../../hadoop-dist/target/ozone
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
index da63f84..44bd4a0 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
@@ -20,7 +20,7 @@ services:
image: apache/hadoop-runner
hostname: namenode
volumes:
- - ${HADOOPDIR}:/opt/hadoop
+ - ${OZONEDIR}:/opt/hadoop
ports:
- 9870
environment:
@@ -31,7 +31,7 @@ services:
datanode:
image: apache/hadoop-runner
volumes:
- - ${HADOOPDIR}:/opt/hadoop
+ - ${OZONEDIR}:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/ozone","datanode"]
@@ -41,7 +41,7 @@ services:
image: apache/hadoop-runner
hostname: ksm
volumes:
- - ${HADOOPDIR}:/opt/hadoop
+ - ${OZONEDIR}:/opt/hadoop
ports:
- 9874
environment:
@@ -52,7 +52,7 @@ services:
scm:
image: apache/hadoop-runner
volumes:
- - ${HADOOPDIR}:/opt/hadoop
+ - ${OZONEDIR}:/opt/hadoop
ports:
- 9876
env_file:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4f24ad/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
index 211ec4c..c0e04a8 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
@@ -21,8 +21,7 @@ Suite Teardown Teardown Ozone Cluster
*** Variables ***
${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
-${version}
-
+${basedir}
*** Test Cases ***
Daemons are running without error
@@ -130,8 +129,8 @@ Execute on
Run docker compose
[arguments] ${command}
- Set Environment Variable HADOOPDIR ${basedir}/../../hadoop-dist/target/hadoop-${version}
- ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/target/compose/docker-compose.yaml ${command}
+ Set Environment Variable OZONEDIR ${basedir}/hadoop-dist/target/ozone
+ ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command}
Log ${output}
Should Be Equal As Integers ${rc} 0
[return] ${rc} ${output}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[36/50] [abbrv] hadoop git commit: YARN-8130 Race condition when
container events are published for KILLED applications. (Rohith Sharma K S
via Haibo Chen)
Posted by xy...@apache.org.
YARN-8130 Race condition when container events are published for KILLED applications. (Rohith Sharma K S via Haibo Chen)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d00a0c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d00a0c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d00a0c7
Branch: refs/heads/HDDS-4
Commit: 2d00a0c71b5dde31e2cf8fcb96d9d541d41fb879
Parents: 6beb25a
Author: Haibo Chen <ha...@apache.org>
Authored: Mon May 14 11:08:42 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Mon May 14 11:08:42 2018 -0700
----------------------------------------------------------------------
.../timelineservice/NMTimelineEvent.java | 12 ++-
.../timelineservice/NMTimelineEventType.java | 3 +
.../timelineservice/NMTimelinePublisher.java | 23 +++--
.../TestNMTimelinePublisher.java | 102 ++++++++++++++++---
4 files changed, 113 insertions(+), 27 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d00a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEvent.java
index f275b37..1ee27d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEvent.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.server.nodemanager.timelineservice;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.AbstractEvent;
/**
@@ -25,11 +26,14 @@ import org.apache.hadoop.yarn.event.AbstractEvent;
* timelineservice v2.
*/
public class NMTimelineEvent extends AbstractEvent<NMTimelineEventType> {
- public NMTimelineEvent(NMTimelineEventType type) {
- super(type);
+ private ApplicationId appId;
+
+ public NMTimelineEvent(NMTimelineEventType type, ApplicationId appId) {
+ super(type, System.currentTimeMillis());
+ this.appId=appId;
}
- public NMTimelineEvent(NMTimelineEventType type, long timestamp) {
- super(type, timestamp);
+ public ApplicationId getApplicationId() {
+ return appId;
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d00a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEventType.java
index b4ae45a..5d81c94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelineEventType.java
@@ -24,4 +24,7 @@ package org.apache.hadoop.yarn.server.nodemanager.timelineservice;
public enum NMTimelineEventType {
// Publish the NM Timeline entity
TIMELINE_ENTITY_PUBLISH,
+
+ // Stop and remove timeline client
+ STOP_TIMELINE_CLIENT
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d00a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index 13d5c67..f451726 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -96,7 +96,7 @@ public class NMTimelinePublisher extends CompositeService {
@Override
protected void serviceInit(Configuration conf) throws Exception {
- dispatcher = new AsyncDispatcher("NM Timeline dispatcher");
+ dispatcher = createDispatcher();
dispatcher.register(NMTimelineEventType.class,
new ForwardingEventHandler());
addIfService(dispatcher);
@@ -113,6 +113,10 @@ public class NMTimelinePublisher extends CompositeService {
super.serviceInit(conf);
}
+ protected AsyncDispatcher createDispatcher() {
+ return new AsyncDispatcher("NM Timeline dispatcher");
+ }
+
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
@@ -141,6 +145,9 @@ public class NMTimelinePublisher extends CompositeService {
putEntity(((TimelinePublishEvent) event).getTimelineEntityToPublish(),
((TimelinePublishEvent) event).getApplicationId());
break;
+ case STOP_TIMELINE_CLIENT:
+ removeAndStopTimelineClient(event.getApplicationId());
+ break;
default:
LOG.error("Unknown NMTimelineEvent type: " + event.getType());
}
@@ -392,20 +399,13 @@ public class NMTimelinePublisher extends CompositeService {
}
private static class TimelinePublishEvent extends NMTimelineEvent {
- private ApplicationId appId;
private TimelineEntity entityToPublish;
public TimelinePublishEvent(TimelineEntity entity, ApplicationId appId) {
- super(NMTimelineEventType.TIMELINE_ENTITY_PUBLISH, System
- .currentTimeMillis());
- this.appId = appId;
+ super(NMTimelineEventType.TIMELINE_ENTITY_PUBLISH, appId);
this.entityToPublish = entity;
}
- public ApplicationId getApplicationId() {
- return appId;
- }
-
public TimelineEntity getTimelineEntityToPublish() {
return entityToPublish;
}
@@ -434,6 +434,11 @@ public class NMTimelinePublisher extends CompositeService {
}
public void stopTimelineClient(ApplicationId appId) {
+ dispatcher.getEventHandler().handle(
+ new NMTimelineEvent(NMTimelineEventType.STOP_TIMELINE_CLIENT, appId));
+ }
+
+ private void removeAndStopTimelineClient(ApplicationId appId) {
TimelineV2Client client = appToClientMap.remove(appId);
if (client != null) {
client.stop();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d00a0c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
index 43196c7..2585262 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/TestNMTimelinePublisher.java
@@ -31,34 +31,47 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.timelineservice.ContainerEntity;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants;
import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationContainerFinishedEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
import org.junit.Assert;
import org.junit.Test;
+import org.junit.After;
+import org.junit.Before;
public class TestNMTimelinePublisher {
private static final String MEMORY_ID = "MEMORY";
private static final String CPU_ID = "CPU";
- @Test
- public void testContainerResourceUsage() {
- Context context = mock(Context.class);
- @SuppressWarnings("unchecked")
- final DummyTimelineClient timelineClient = new DummyTimelineClient(null);
- when(context.getNodeId()).thenReturn(NodeId.newInstance("localhost", 0));
+ private NMTimelinePublisher publisher;
+ private DummyTimelineClient timelineClient;
+ private Configuration conf;
+ private DrainDispatcher dispatcher;
- Configuration conf = new Configuration();
+
+ @Before public void setup() throws Exception {
+ conf = new Configuration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+ conf.setLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,
+ 3000L);
+ timelineClient = new DummyTimelineClient(null);
+ Context context = createMockContext();
+ dispatcher = new DrainDispatcher();
- NMTimelinePublisher publisher = new NMTimelinePublisher(context) {
+ publisher = new NMTimelinePublisher(context) {
public void createTimelineClient(ApplicationId appId) {
if (!getAppToClientMap().containsKey(appId)) {
timelineClient.init(getConfig());
@@ -66,15 +79,73 @@ public class TestNMTimelinePublisher {
getAppToClientMap().put(appId, timelineClient);
}
}
+
+ @Override protected AsyncDispatcher createDispatcher() {
+ return dispatcher;
+ }
};
publisher.init(conf);
publisher.start();
+ }
+
+ private Context createMockContext() {
+ Context context = mock(Context.class);
+ when(context.getNodeId()).thenReturn(NodeId.newInstance("localhost", 0));
+ return context;
+ }
+
+ @After public void tearDown() throws Exception {
+ if (publisher != null) {
+ publisher.stop();
+ }
+ if (timelineClient != null) {
+ timelineClient.stop();
+ }
+ }
+
+ @Test public void testPublishContainerFinish() throws Exception {
+ ApplicationId appId = ApplicationId.newInstance(0, 2);
+ ApplicationAttemptId appAttemptId =
+ ApplicationAttemptId.newInstance(appId, 1);
+ ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
+
+ String diag = "test-diagnostics";
+ int exitStatus = 0;
+ ContainerStatus cStatus = mock(ContainerStatus.class);
+ when(cStatus.getContainerId()).thenReturn(cId);
+ when(cStatus.getDiagnostics()).thenReturn(diag);
+ when(cStatus.getExitStatus()).thenReturn(exitStatus);
+ long timeStamp = System.currentTimeMillis();
+
+ ApplicationContainerFinishedEvent finishedEvent =
+ new ApplicationContainerFinishedEvent(cStatus, timeStamp);
+
+ publisher.createTimelineClient(appId);
+ publisher.publishApplicationEvent(finishedEvent);
+ publisher.stopTimelineClient(appId);
+ dispatcher.await();
+
+ ContainerEntity cEntity = new ContainerEntity();
+ cEntity.setId(cId.toString());
+ TimelineEntity[] lastPublishedEntities =
+ timelineClient.getLastPublishedEntities();
+
+ Assert.assertNotNull(lastPublishedEntities);
+ Assert.assertEquals(1, lastPublishedEntities.length);
+ TimelineEntity entity = lastPublishedEntities[0];
+ Assert.assertTrue(cEntity.equals(entity));
+ Assert.assertEquals(diag,
+ entity.getInfo().get(ContainerMetricsConstants.DIAGNOSTICS_INFO));
+ Assert.assertEquals(exitStatus,
+ entity.getInfo().get(ContainerMetricsConstants.EXIT_STATUS_INFO));
+ }
+
+ @Test public void testContainerResourceUsage() {
ApplicationId appId = ApplicationId.newInstance(0, 1);
publisher.createTimelineClient(appId);
Container aContainer = mock(Container.class);
- when(aContainer.getContainerId()).thenReturn(ContainerId.newContainerId(
- ApplicationAttemptId.newInstance(appId, 1),
- 0L));
+ when(aContainer.getContainerId()).thenReturn(ContainerId
+ .newContainerId(ApplicationAttemptId.newInstance(appId, 1), 0L));
publisher.reportContainerResourceUsage(aContainer, 1024L, 8F);
verifyPublishedResourceUsageMetrics(timelineClient, 1024L, 8);
timelineClient.reset();
@@ -91,7 +162,6 @@ public class TestNMTimelinePublisher {
(float) ResourceCalculatorProcessTree.UNAVAILABLE);
verifyPublishedResourceUsageMetrics(timelineClient, 1024L,
ResourceCalculatorProcessTree.UNAVAILABLE);
- publisher.stop();
}
private void verifyPublishedResourceUsageMetrics(
@@ -151,8 +221,12 @@ public class TestNMTimelinePublisher {
private TimelineEntity[] lastPublishedEntities;
- @Override
- public void putEntitiesAsync(TimelineEntity... entities)
+ @Override public void putEntitiesAsync(TimelineEntity... entities)
+ throws IOException, YarnException {
+ this.lastPublishedEntities = entities;
+ }
+
+ @Override public void putEntities(TimelineEntity... entities)
throws IOException, YarnException {
this.lastPublishedEntities = entities;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[33/50] [abbrv] hadoop git commit: HDFS-13539. DFSStripedInputStream
NPE when reportCheckSumFailure.
Posted by xy...@apache.org.
HDFS-13539. DFSStripedInputStream NPE when reportCheckSumFailure.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/960940e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/960940e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/960940e0
Branch: refs/heads/HDDS-4
Commit: 960940e0e08f7839775f2d8a352b444d104d36b4
Parents: fc5d49c
Author: Xiao Chen <xi...@apache.org>
Authored: Mon May 14 09:28:09 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon May 14 09:28:39 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/DFSInputStream.java | 13 +++++++++-
.../hadoop/hdfs/DFSStripedInputStream.java | 8 ++++---
.../hadoop/hdfs/TestDFSStripedInputStream.java | 25 ++++++++++++++++++++
3 files changed, 42 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/960940e0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index d3d6669..b38e629 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -790,13 +790,24 @@ public class DFSInputStream extends FSInputStream
// Check if need to report block replicas corruption either read
// was successful or ChecksumException occurred.
reportCheckSumFailure(corruptedBlocks,
- currentLocatedBlock.getLocations().length, false);
+ getCurrentBlockLocationsLength(), false);
}
}
}
return -1;
}
+ protected int getCurrentBlockLocationsLength() {
+ int len = 0;
+ if (currentLocatedBlock == null) {
+ DFSClient.LOG.info("Found null currentLocatedBlock. pos={}, "
+ + "blockEnd={}, fileLength={}", pos, blockEnd, getFileLength());
+ } else {
+ len = currentLocatedBlock.getLocations().length;
+ }
+ return len;
+ }
+
/**
* Read the entire buffer.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/960940e0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 339a02c..f3b16e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.hdfs.protocol.BlockType;
@@ -160,7 +161,8 @@ public class DFSStripedInputStream extends DFSInputStream {
* When seeking into a new block group, create blockReader for each internal
* block in the group.
*/
- private synchronized void blockSeekTo(long target) throws IOException {
+ @VisibleForTesting
+ synchronized void blockSeekTo(long target) throws IOException {
if (target >= getFileLength()) {
throw new IOException("Attempted to read past end of file");
}
@@ -400,8 +402,8 @@ public class DFSStripedInputStream extends DFSInputStream {
} finally {
// Check if need to report block replicas corruption either read
// was successful or ChecksumException occurred.
- reportCheckSumFailure(corruptedBlocks,
- currentLocatedBlock.getLocations().length, true);
+ reportCheckSumFailure(corruptedBlocks, getCurrentBlockLocationsLength(),
+ true);
}
}
return -1;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/960940e0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index de276a9..cdebee0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -51,7 +52,12 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
public class TestDFSStripedInputStream {
@@ -504,4 +510,23 @@ public class TestDFSStripedInputStream {
in.close();
}
}
+
+ @Test
+ public void testReadFailToGetCurrentBlock() throws Exception {
+ DFSTestUtil.writeFile(cluster.getFileSystem(), filePath, "test");
+ try (DFSStripedInputStream in = (DFSStripedInputStream) fs.getClient()
+ .open(filePath.toString())) {
+ final DFSStripedInputStream spy = spy(in);
+ final String msg = "Injected exception for testReadNPE";
+ doThrow(new IOException(msg)).when(spy).blockSeekTo(anyLong());
+ assertNull(in.getCurrentBlock());
+ try {
+ spy.read();
+ fail("read should have failed");
+ } catch (IOException expected) {
+ LOG.info("Exception caught", expected);
+ GenericTestUtils.assertExceptionContains(msg, expected);
+ }
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[28/50] [abbrv] hadoop git commit: Revert "Add 2.9.1 release notes
and changes documents"
Posted by xy...@apache.org.
Revert "Add 2.9.1 release notes and changes documents"
This reverts commit e4dc346d651de4c9af05a9616f8fe6369895d8af.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66c9905b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66c9905b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66c9905b
Branch: refs/heads/HDDS-4
Commit: 66c9905b5734472ce64863790c208e1f3775c870
Parents: e4dc346
Author: sammichen <sa...@intel.com>
Authored: Mon May 14 14:38:40 2018 +0800
Committer: sammichen <sa...@intel.com>
Committed: Mon May 14 14:38:40 2018 +0800
----------------------------------------------------------------------
.../markdown/release/2.9.1/CHANGES.2.9.1.md | 277 ----------------
.../release/2.9.1/RELEASENOTES.2.9.1.md | 88 ------
.../jdiff/Apache_Hadoop_HDFS_2.9.1.xml | 312 -------------------
hadoop-project-dist/pom.xml | 2 +-
4 files changed, 1 insertion(+), 678 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/66c9905b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
deleted file mode 100644
index c5e53f6..0000000
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
+++ /dev/null
@@ -1,277 +0,0 @@
-
-<!---
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
--->
-# "Apache Hadoop" Changelog
-
-## Release 2.9.1 - 2018-04-16
-
-### INCOMPATIBLE CHANGES:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: Document Router and State Store metrics | Major | documentation | Yiqun Lin | Yiqun Lin |
-| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add ACL support for mount table | Major | . | Yiqun Lin | Yiqun Lin |
-| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath | Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun Saxena |
-| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use the ZooKeeper as the default State Store | Minor | documentation | Yiqun Lin | Yiqun Lin |
-
-
-### IMPORTANT ISSUES:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix doc error setting up client | Major | federation | tartarus | tartarus |
-
-
-### NEW FEATURES:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | Incorporate Aliyun OSS file system implementation | Major | fs, fs/oss | shimingfei | mingfei.shi |
-
-
-### IMPROVEMENTS:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | CryptoInputStream should implement unbuffer | Major | fs, security | John Zhuge | John Zhuge |
-| [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | AliyunOSS: backport Aliyun OSS module to branch-2 | Major | fs/oss | Genmao Yu | SammiChen |
-| [YARN-6851](https://issues.apache.org/jira/browse/YARN-6851) | Capacity Scheduler: document configs for controlling # containers allowed to be allocated per node heartbeat | Minor | . | Wei Yan | Wei Yan |
-| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve robustness of the AggregatedLogDeletionService | Major | log-aggregation | Jonathan Eagles | Jonathan Eagles |
-| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager web UI should display container type in containers page | Major | nodemanager, webapp | Weiwei Yang | Weiwei Yang |
-| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix TestUnbuffer#testUnbufferException failure | Minor | test | Jack Bearden | Jack Bearden |
-| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add readahead, dropbehind, and unbuffer to StreamCapabilities | Major | fs | John Zhuge | John Zhuge |
-| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | AliyunOSS: change the default value of max error retry | Major | fs/oss | wujinhu | wujinhu |
-| [YARN-7642](https://issues.apache.org/jira/browse/YARN-7642) | Add test case to verify context update after container promotion or demotion with or without auto update | Minor | nodemanager | Weiwei Yang | Weiwei Yang |
-| [HADOOP-15111](https://issues.apache.org/jira/browse/HADOOP-15111) | AliyunOSS: backport HADOOP-14993 to branch-2 | Major | fs/oss | Genmao Yu | Genmao Yu |
-| [HDFS-9023](https://issues.apache.org/jira/browse/HDFS-9023) | When NN is not able to identify DN for replication, reason behind it can be logged | Critical | hdfs-client, namenode | Surendra Singh Lilhore | Xiao Chen |
-| [YARN-7678](https://issues.apache.org/jira/browse/YARN-7678) | Ability to enable logging of container memory stats | Major | nodemanager | Jim Brennan | Jim Brennan |
-| [HDFS-12945](https://issues.apache.org/jira/browse/HDFS-12945) | Switch to ClientProtocol instead of NamenodeProtocols in NamenodeWebHdfsMethods | Minor | . | Wei Yan | Wei Yan |
-| [YARN-7590](https://issues.apache.org/jira/browse/YARN-7590) | Improve container-executor validation check | Major | security, yarn | Eric Yang | Eric Yang |
-| [HADOOP-15189](https://issues.apache.org/jira/browse/HADOOP-15189) | backport HADOOP-15039 to branch-2 and branch-3 | Blocker | . | Genmao Yu | Genmao Yu |
-| [HADOOP-15212](https://issues.apache.org/jira/browse/HADOOP-15212) | Add independent secret manager method for logging expired tokens | Major | security | Daryn Sharp | Daryn Sharp |
-| [YARN-7728](https://issues.apache.org/jira/browse/YARN-7728) | Expose container preemptions related information in Capacity Scheduler queue metrics | Major | . | Eric Payne | Eric Payne |
-| [MAPREDUCE-7048](https://issues.apache.org/jira/browse/MAPREDUCE-7048) | Uber AM can crash due to unknown task in statusUpdate | Major | mr-am | Peter Bacsko | Peter Bacsko |
-| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to support per-store configuration | Major | fs/adl | John Zhuge | Sharad Sonker |
-| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity Scheduler Intra-queue Preemption should be configurable for each queue | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
-| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize disk access for last partial chunk checksum of Finalized replica | Major | datanode | Wei-Chiu Chuang | Gabor Bota |
-| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | increase maven heap size recommendations | Minor | build, documentation, test | Allen Wittenauer | Allen Wittenauer |
-| [HDFS-12884](https://issues.apache.org/jira/browse/HDFS-12884) | BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo | Major | namenode | Konstantin Shvachko | chencan |
-| [HADOOP-15334](https://issues.apache.org/jira/browse/HADOOP-15334) | Upgrade Maven surefire plugin | Major | build | Arpit Agarwal | Arpit Agarwal |
-| [YARN-7623](https://issues.apache.org/jira/browse/YARN-7623) | Fix the CapacityScheduler Queue configuration documentation | Major | . | Arun Suresh | Jonathan Hung |
-| [HDFS-13314](https://issues.apache.org/jira/browse/HDFS-13314) | NameNode should optionally exit if it detects FsImage corruption | Major | namenode | Arpit Agarwal | Arpit Agarwal |
-
-
-### BUG FIXES:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-13723](https://issues.apache.org/jira/browse/HADOOP-13723) | AliyunOSSInputStream#read() should update read bytes stat correctly | Major | tools | Mingliang Liu | Mingliang Liu |
-| [HADOOP-14045](https://issues.apache.org/jira/browse/HADOOP-14045) | Aliyun OSS documentation missing from website | Major | documentation, fs/oss | Andrew Wang | Yiqun Lin |
-| [HADOOP-14458](https://issues.apache.org/jira/browse/HADOOP-14458) | Add missing imports to TestAliyunOSSFileSystemContract.java | Trivial | fs/oss, test | Mingliang Liu | Mingliang Liu |
-| [HADOOP-14466](https://issues.apache.org/jira/browse/HADOOP-14466) | Remove useless document from TestAliyunOSSFileSystemContract.java | Minor | documentation | Akira Ajisaka | Chen Liang |
-| [HDFS-12318](https://issues.apache.org/jira/browse/HDFS-12318) | Fix IOException condition for openInfo in DFSInputStream | Major | . | legend | legend |
-| [HDFS-12614](https://issues.apache.org/jira/browse/HDFS-12614) | FSPermissionChecker#getINodeAttrs() throws NPE when INodeAttributesProvider configured | Major | . | Manoj Govindassamy | Manoj Govindassamy |
-| [HDFS-12788](https://issues.apache.org/jira/browse/HDFS-12788) | Reset the upload button when file upload fails | Critical | ui, webhdfs | Brahma Reddy Battula | Brahma Reddy Battula |
-| [YARN-7388](https://issues.apache.org/jira/browse/YARN-7388) | TestAMRestart should be scheduler agnostic | Major | . | Haibo Chen | Haibo Chen |
-| [HDFS-12705](https://issues.apache.org/jira/browse/HDFS-12705) | WebHdfsFileSystem exceptions should retain the caused by exception | Major | hdfs | Daryn Sharp | Hanisha Koneru |
-| [YARN-7361](https://issues.apache.org/jira/browse/YARN-7361) | Improve the docker container runtime documentation | Major | . | Shane Kumpf | Shane Kumpf |
-| [YARN-7469](https://issues.apache.org/jira/browse/YARN-7469) | Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit | Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
-| [YARN-7489](https://issues.apache.org/jira/browse/YARN-7489) | ConcurrentModificationException in RMAppImpl#getRMAppMetrics | Major | capacityscheduler | Tao Yang | Tao Yang |
-| [YARN-7525](https://issues.apache.org/jira/browse/YARN-7525) | Incorrect query parameters in cluster nodes REST API document | Minor | documentation | Tao Yang | Tao Yang |
-| [HADOOP-15045](https://issues.apache.org/jira/browse/HADOOP-15045) | ISA-L build options are documented in branch-2 | Major | build, documentation | Akira Ajisaka | Akira Ajisaka |
-| [YARN-7390](https://issues.apache.org/jira/browse/YARN-7390) | All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. | Major | fairscheduler, reservation system | Yufei Gu | Yufei Gu |
-| [HDFS-12754](https://issues.apache.org/jira/browse/HDFS-12754) | Lease renewal can hit a deadlock | Major | . | Kuhu Shukla | Kuhu Shukla |
-| [HDFS-12832](https://issues.apache.org/jira/browse/HDFS-12832) | INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit | Critical | namenode | DENG FEI | Konstantin Shvachko |
-| [HDFS-11754](https://issues.apache.org/jira/browse/HDFS-11754) | Make FsServerDefaults cache configurable. | Minor | . | Rushabh S Shah | Mikhail Erofeev |
-| [YARN-7509](https://issues.apache.org/jira/browse/YARN-7509) | AsyncScheduleThread and ResourceCommitterService are still running after RM is transitioned to standby | Critical | . | Tao Yang | Tao Yang |
-| [YARN-7558](https://issues.apache.org/jira/browse/YARN-7558) | "yarn logs" command fails to get logs for running containers if UI authentication is enabled. | Critical | . | Namit Maheshwari | Xuan Gong |
-| [HDFS-12638](https://issues.apache.org/jira/browse/HDFS-12638) | Delete copy-on-truncate block along with the original block, when deleting a file being truncated | Blocker | hdfs | Jiandan Yang | Konstantin Shvachko |
-| [MAPREDUCE-5124](https://issues.apache.org/jira/browse/MAPREDUCE-5124) | AM lacks flow control for task events | Major | mr-am | Jason Lowe | Peter Bacsko |
-| [YARN-7455](https://issues.apache.org/jira/browse/YARN-7455) | quote\_and\_append\_arg can overflow buffer | Major | nodemanager | Jason Lowe | Jim Brennan |
-| [HADOOP-14985](https://issues.apache.org/jira/browse/HADOOP-14985) | Remove subversion related code from VersionInfoMojo.java | Minor | build | Akira Ajisaka | Ajay Kumar |
-| [HDFS-12889](https://issues.apache.org/jira/browse/HDFS-12889) | Router UI is missing robots.txt file | Major | . | Bharat Viswanadham | Bharat Viswanadham |
-| [HDFS-11576](https://issues.apache.org/jira/browse/HDFS-11576) | Block recovery will fail indefinitely if recovery time \> heartbeat interval | Critical | datanode, hdfs, namenode | Lukas Majercak | Lukas Majercak |
-| [YARN-7607](https://issues.apache.org/jira/browse/YARN-7607) | Remove the trailing duplicated timestamp in container diagnostics message | Minor | nodemanager | Weiwei Yang | Weiwei Yang |
-| [HADOOP-15080](https://issues.apache.org/jira/browse/HADOOP-15080) | Aliyun OSS: update oss sdk from 2.8.1 to 2.8.3 to remove its dependency on Cat-x "json-lib" | Blocker | fs/oss | Chris Douglas | SammiChen |
-| [YARN-7591](https://issues.apache.org/jira/browse/YARN-7591) | NPE in async-scheduling mode of CapacityScheduler | Critical | capacityscheduler | Tao Yang | Tao Yang |
-| [YARN-7608](https://issues.apache.org/jira/browse/YARN-7608) | Incorrect sTarget column causing DataTable warning on RM application and scheduler web page | Major | resourcemanager, webapp | Weiwei Yang | Gergely Novák |
-| [HDFS-12833](https://issues.apache.org/jira/browse/HDFS-12833) | Distcp : Update the usage of delete option for dependency with update and overwrite option | Minor | distcp, hdfs | Harshakiran Reddy | usharani |
-| [YARN-7647](https://issues.apache.org/jira/browse/YARN-7647) | NM print inappropriate error log when node-labels is enabled | Minor | . | Yang Wang | Yang Wang |
-| [HDFS-12907](https://issues.apache.org/jira/browse/HDFS-12907) | Allow read-only access to reserved raw for non-superusers | Major | namenode | Daryn Sharp | Rushabh S Shah |
-| [HDFS-12881](https://issues.apache.org/jira/browse/HDFS-12881) | Output streams closed with IOUtils suppressing write errors | Major | . | Jason Lowe | Ajay Kumar |
-| [YARN-7595](https://issues.apache.org/jira/browse/YARN-7595) | Container launching code suppresses close exceptions after writes | Major | nodemanager | Jason Lowe | Jim Brennan |
-| [HADOOP-15085](https://issues.apache.org/jira/browse/HADOOP-15085) | Output streams closed with IOUtils suppressing write errors | Major | . | Jason Lowe | Jim Brennan |
-| [YARN-7661](https://issues.apache.org/jira/browse/YARN-7661) | NodeManager metrics return wrong value after update node resource | Major | . | Yang Wang | Yang Wang |
-| [HDFS-12347](https://issues.apache.org/jira/browse/HDFS-12347) | TestBalancerRPCDelay#testBalancerRPCDelay fails very frequently | Critical | test | Xiao Chen | Bharat Viswanadham |
-| [YARN-7542](https://issues.apache.org/jira/browse/YARN-7542) | Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED | Major | . | Arun Suresh | Sampada Dehankar |
-| [HADOOP-15143](https://issues.apache.org/jira/browse/HADOOP-15143) | NPE due to Invalid KerberosTicket in UGI | Major | . | Jitendra Nath Pandey | Mukul Kumar Singh |
-| [YARN-7692](https://issues.apache.org/jira/browse/YARN-7692) | Skip validating priority acls while recovering applications | Blocker | resourcemanager | Charan Hebri | Sunil G |
-| [MAPREDUCE-7028](https://issues.apache.org/jira/browse/MAPREDUCE-7028) | Concurrent task progress updates causing NPE in Application Master | Blocker | mr-am | Gergo Repas | Gergo Repas |
-| [YARN-7619](https://issues.apache.org/jira/browse/YARN-7619) | Max AM Resource value in Capacity Scheduler UI has to be refreshed for every user | Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
-| [YARN-7699](https://issues.apache.org/jira/browse/YARN-7699) | queueUsagePercentage is coming as INF for getApp REST api call | Major | webapp | Sunil G | Sunil G |
-| [YARN-7508](https://issues.apache.org/jira/browse/YARN-7508) | NPE in FiCaSchedulerApp when debug log enabled in async-scheduling mode | Major | capacityscheduler | Tao Yang | Tao Yang |
-| [YARN-7663](https://issues.apache.org/jira/browse/YARN-7663) | RMAppImpl:Invalid event: START at KILLED | Minor | resourcemanager | lujie | lujie |
-| [YARN-6948](https://issues.apache.org/jira/browse/YARN-6948) | Invalid event: ATTEMPT\_ADDED at FINAL\_SAVING | Minor | yarn | lujie | lujie |
-| [YARN-7735](https://issues.apache.org/jira/browse/YARN-7735) | Fix typo in YARN documentation | Minor | documentation | Takanobu Asanuma | Takanobu Asanuma |
-| [YARN-7727](https://issues.apache.org/jira/browse/YARN-7727) | Incorrect log levels in few logs with QueuePriorityContainerCandidateSelector | Minor | yarn | Prabhu Joseph | Prabhu Joseph |
-| [HDFS-11915](https://issues.apache.org/jira/browse/HDFS-11915) | Sync rbw dir on the first hsync() to avoid file lost on power failure | Critical | . | Kanaka Kumar Avvaru | Vinayakumar B |
-| [HDFS-9049](https://issues.apache.org/jira/browse/HDFS-9049) | Make Datanode Netty reverse proxy port to be configurable | Major | datanode | Vinayakumar B | Vinayakumar B |
-| [HADOOP-15150](https://issues.apache.org/jira/browse/HADOOP-15150) | in FsShell, UGI params should be overidden through env vars(-D arg) | Major | . | Brahma Reddy Battula | Brahma Reddy Battula |
-| [HADOOP-15181](https://issues.apache.org/jira/browse/HADOOP-15181) | Typo in SecureMode.md | Trivial | documentation | Masahiro Tanaka | Masahiro Tanaka |
-| [YARN-7737](https://issues.apache.org/jira/browse/YARN-7737) | prelaunch.err file not found exception on container failure | Major | . | Jonathan Hung | Keqiu Hu |
-| [HDFS-13063](https://issues.apache.org/jira/browse/HDFS-13063) | Fix the incorrect spelling in HDFSHighAvailabilityWithQJM.md | Trivial | documentation | Jianfei Jiang | Jianfei Jiang |
-| [YARN-7102](https://issues.apache.org/jira/browse/YARN-7102) | NM heartbeat stuck when responseId overflows MAX\_INT | Critical | . | Botong Huang | Botong Huang |
-| [HADOOP-15151](https://issues.apache.org/jira/browse/HADOOP-15151) | MapFile.fix creates a wrong index file in case of block-compressed data file. | Major | common | Grigori Rybkine | Grigori Rybkine |
-| [MAPREDUCE-7020](https://issues.apache.org/jira/browse/MAPREDUCE-7020) | Task timeout in uber mode can crash AM | Major | mr-am | Akira Ajisaka | Peter Bacsko |
-| [YARN-7698](https://issues.apache.org/jira/browse/YARN-7698) | A misleading variable's name in ApplicationAttemptEventDispatcher | Minor | resourcemanager | Jinjiang Ling | Jinjiang Ling |
-| [HDFS-13100](https://issues.apache.org/jira/browse/HDFS-13100) | Handle IllegalArgumentException when GETSERVERDEFAULTS is not implemented in webhdfs. | Critical | hdfs, webhdfs | Yongjun Zhang | Yongjun Zhang |
-| [YARN-6868](https://issues.apache.org/jira/browse/YARN-6868) | Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml | Major | yarn | Ray Chiang | Ray Chiang |
-| [YARN-7849](https://issues.apache.org/jira/browse/YARN-7849) | TestMiniYarnClusterNodeUtilization#testUpdateNodeUtilization fails due to heartbeat sync error | Major | test | Jason Lowe | Botong Huang |
-| [YARN-7801](https://issues.apache.org/jira/browse/YARN-7801) | AmFilterInitializer should addFilter after fill all parameters | Critical | . | Sumana Sathish | Wangda Tan |
-| [YARN-7890](https://issues.apache.org/jira/browse/YARN-7890) | NPE during container relaunch | Major | . | Billie Rinaldi | Jason Lowe |
-| [HDFS-12935](https://issues.apache.org/jira/browse/HDFS-12935) | Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up | Major | tools | Jianfei Jiang | Jianfei Jiang |
-| [HDFS-13120](https://issues.apache.org/jira/browse/HDFS-13120) | Snapshot diff could be corrupted after concat | Major | namenode, snapshots | Xiaoyu Yao | Xiaoyu Yao |
-| [HDFS-10453](https://issues.apache.org/jira/browse/HDFS-10453) | ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster. | Major | namenode | He Xiaoqiao | He Xiaoqiao |
-| [HDFS-8693](https://issues.apache.org/jira/browse/HDFS-8693) | refreshNamenodes does not support adding a new standby to a running DN | Critical | datanode, ha | Jian Fang | Ajith S |
-| [MAPREDUCE-7052](https://issues.apache.org/jira/browse/MAPREDUCE-7052) | TestFixedLengthInputFormat#testFormatCompressedIn is flaky | Major | client, test | Peter Bacsko | Peter Bacsko |
-| [HDFS-13112](https://issues.apache.org/jira/browse/HDFS-13112) | Token expiration edits may cause log corruption or deadlock | Critical | namenode | Daryn Sharp | Daryn Sharp |
-| [MAPREDUCE-7053](https://issues.apache.org/jira/browse/MAPREDUCE-7053) | Timed out tasks can fail to produce thread dump | Major | . | Jason Lowe | Jason Lowe |
-| [HADOOP-15206](https://issues.apache.org/jira/browse/HADOOP-15206) | BZip2 drops and duplicates records when input split size is small | Major | . | Aki Tanaka | Aki Tanaka |
-| [YARN-7947](https://issues.apache.org/jira/browse/YARN-7947) | Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
-| [YARN-7945](https://issues.apache.org/jira/browse/YARN-7945) | Java Doc error in UnmanagedAMPoolManager for branch-2 | Major | . | Rohith Sharma K S | Botong Huang |
-| [HADOOP-14903](https://issues.apache.org/jira/browse/HADOOP-14903) | Add json-smart explicitly to pom.xml | Major | common | Ray Chiang | Ray Chiang |
-| [HDFS-12781](https://issues.apache.org/jira/browse/HDFS-12781) | After Datanode down, In Namenode UI Datanode tab is throwing warning message. | Major | datanode | Harshakiran Reddy | Brahma Reddy Battula |
-| [HDFS-12070](https://issues.apache.org/jira/browse/HDFS-12070) | Failed block recovery leaves files open indefinitely and at risk for data loss | Major | . | Daryn Sharp | Kihwal Lee |
-| [HADOOP-15251](https://issues.apache.org/jira/browse/HADOOP-15251) | Backport HADOOP-13514 (surefire upgrade) to branch-2 | Major | test | Chris Douglas | Chris Douglas |
-| [HADOOP-15275](https://issues.apache.org/jira/browse/HADOOP-15275) | Incorrect javadoc for return type of RetryPolicy#shouldRetry | Minor | documentation | Nanda kumar | Nanda kumar |
-| [YARN-7511](https://issues.apache.org/jira/browse/YARN-7511) | NPE in ContainerLocalizer when localization failed for running container | Major | nodemanager | Tao Yang | Tao Yang |
-| [MAPREDUCE-7023](https://issues.apache.org/jira/browse/MAPREDUCE-7023) | TestHadoopArchiveLogs.testCheckFilesAndSeedApps fails on rerun | Minor | test | Gergely Novák | Gergely Novák |
-| [HADOOP-15283](https://issues.apache.org/jira/browse/HADOOP-15283) | Upgrade from findbugs 3.0.1 to spotbugs 3.1.2 in branch-2 to fix docker image build | Major | . | Xiao Chen | Akira Ajisaka |
-| [YARN-7736](https://issues.apache.org/jira/browse/YARN-7736) | Fix itemization in YARN federation document | Minor | documentation | Akira Ajisaka | Sen Zhao |
-| [HDFS-13164](https://issues.apache.org/jira/browse/HDFS-13164) | File not closed if streamer fail with DSQuotaExceededException | Major | hdfs-client | Xiao Chen | Xiao Chen |
-| [HDFS-13109](https://issues.apache.org/jira/browse/HDFS-13109) | Support fully qualified hdfs path in EZ commands | Major | hdfs | Hanisha Koneru | Hanisha Koneru |
-| [MAPREDUCE-6930](https://issues.apache.org/jira/browse/MAPREDUCE-6930) | mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml | Major | mrv2 | Daniel Templeton | Sen Zhao |
-| [HDFS-12156](https://issues.apache.org/jira/browse/HDFS-12156) | TestFSImage fails without -Pnative | Major | test | Akira Ajisaka | Akira Ajisaka |
-| [HADOOP-15308](https://issues.apache.org/jira/browse/HADOOP-15308) | TestConfiguration fails on Windows because of paths | Major | . | Íñigo Goiri | Xiao Liang |
-| [YARN-7636](https://issues.apache.org/jira/browse/YARN-7636) | Re-reservation count may overflow when cluster resource exhausted for a long time | Major | capacityscheduler | Tao Yang | Tao Yang |
-| [HDFS-12886](https://issues.apache.org/jira/browse/HDFS-12886) | Ignore minReplication for block recovery | Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
-| [HDFS-13296](https://issues.apache.org/jira/browse/HDFS-13296) | GenericTestUtils generates paths with drive letter in Windows and fail webhdfs related test cases | Major | . | Xiao Liang | Xiao Liang |
-| [HDFS-13268](https://issues.apache.org/jira/browse/HDFS-13268) | TestWebHdfsFileContextMainOperations fails on Windows | Major | . | Íñigo Goiri | Xiao Liang |
-| [YARN-8054](https://issues.apache.org/jira/browse/YARN-8054) | Improve robustness of the LocalDirsHandlerService MonitoringTimerTask thread | Major | . | Jonathan Eagles | Jonathan Eagles |
-| [YARN-7873](https://issues.apache.org/jira/browse/YARN-7873) | Revert YARN-6078 | Blocker | . | Billie Rinaldi | Billie Rinaldi |
-| [HDFS-13195](https://issues.apache.org/jira/browse/HDFS-13195) | DataNode conf page cannot display the current value after reconfig | Minor | datanode | maobaolong | maobaolong |
-| [HADOOP-15320](https://issues.apache.org/jira/browse/HADOOP-15320) | Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake | Major | fs/adl, fs/azure | shanyu zhao | shanyu zhao |
-| [HADOOP-12862](https://issues.apache.org/jira/browse/HADOOP-12862) | LDAP Group Mapping over SSL can not specify trust store | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
-| [HDFS-13427](https://issues.apache.org/jira/browse/HDFS-13427) | Fix the section titles of transparent encryption document | Minor | documentation | Akira Ajisaka | Akira Ajisaka |
-
-
-### TESTS:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-14696](https://issues.apache.org/jira/browse/HADOOP-14696) | parallel tests don't work for Windows | Minor | test | Allen Wittenauer | Allen Wittenauer |
-
-
-### SUB-TASKS:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-13481](https://issues.apache.org/jira/browse/HADOOP-13481) | User end documents for Aliyun OSS FileSystem | Minor | fs, fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-13591](https://issues.apache.org/jira/browse/HADOOP-13591) | Unit test failure in TestOSSContractGetFileStatus and TestOSSContractRootDir | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-13624](https://issues.apache.org/jira/browse/HADOOP-13624) | Rename TestAliyunOSSContractDispCp | Major | fs, fs/oss | Kai Zheng | Genmao Yu |
-| [HADOOP-14065](https://issues.apache.org/jira/browse/HADOOP-14065) | AliyunOSS: oss directory filestatus should use meta time | Major | fs/oss | Fei Hui | Fei Hui |
-| [HADOOP-13768](https://issues.apache.org/jira/browse/HADOOP-13768) | AliyunOSS: handle the failure in the batch delete operation `deleteDirs`. | Major | fs | Genmao Yu | Genmao Yu |
-| [HADOOP-14069](https://issues.apache.org/jira/browse/HADOOP-14069) | AliyunOSS: listStatus returns wrong file info | Major | fs/oss | Fei Hui | Fei Hui |
-| [HADOOP-13769](https://issues.apache.org/jira/browse/HADOOP-13769) | AliyunOSS: update oss sdk version | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-14072](https://issues.apache.org/jira/browse/HADOOP-14072) | AliyunOSS: Failed to read from stream when seek beyond the download size | Major | fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-14192](https://issues.apache.org/jira/browse/HADOOP-14192) | Aliyun OSS FileSystem contract test should implement getTestBaseDir() | Major | fs/oss | Mingliang Liu | Mingliang Liu |
-| [HADOOP-14194](https://issues.apache.org/jira/browse/HADOOP-14194) | Aliyun OSS should not use empty endpoint as default | Major | fs/oss | Mingliang Liu | Genmao Yu |
-| [HADOOP-14787](https://issues.apache.org/jira/browse/HADOOP-14787) | AliyunOSS: Implement the `createNonRecursive` operator | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
-| [HADOOP-14649](https://issues.apache.org/jira/browse/HADOOP-14649) | Update aliyun-sdk-oss version to 2.8.1 | Major | fs/oss | Ray Chiang | Genmao Yu |
-| [HADOOP-14799](https://issues.apache.org/jira/browse/HADOOP-14799) | Update nimbus-jose-jwt to 4.41.1 | Major | . | Ray Chiang | Ray Chiang |
-| [HADOOP-14997](https://issues.apache.org/jira/browse/HADOOP-14997) | Add hadoop-aliyun as dependency of hadoop-cloud-storage | Minor | fs/oss | Genmao Yu | Genmao Yu |
-| [HDFS-12801](https://issues.apache.org/jira/browse/HDFS-12801) | RBF: Set MountTableResolver as default file resolver | Minor | . | Íñigo Goiri | Íñigo Goiri |
-| [YARN-7430](https://issues.apache.org/jira/browse/YARN-7430) | Enable user re-mapping for Docker containers by default | Blocker | security, yarn | Eric Yang | Eric Yang |
-| [YARN-6128](https://issues.apache.org/jira/browse/YARN-6128) | Add support for AMRMProxy HA | Major | amrmproxy, nodemanager | Subru Krishnan | Botong Huang |
-| [HADOOP-15024](https://issues.apache.org/jira/browse/HADOOP-15024) | AliyunOSS: support user agent configuration and include that & Hadoop version information to oss server | Major | fs, fs/oss | SammiChen | SammiChen |
-| [HDFS-12858](https://issues.apache.org/jira/browse/HDFS-12858) | RBF: Add router admin commands usage in HDFS commands reference doc | Minor | documentation | Yiqun Lin | Yiqun Lin |
-| [HDFS-12835](https://issues.apache.org/jira/browse/HDFS-12835) | RBF: Fix Javadoc parameter errors | Minor | . | Wei Yan | Wei Yan |
-| [YARN-7587](https://issues.apache.org/jira/browse/YARN-7587) | Skip dispatching opportunistic containers to nodes whose queue is already full | Major | . | Weiwei Yang | Weiwei Yang |
-| [HDFS-12396](https://issues.apache.org/jira/browse/HDFS-12396) | Webhdfs file system should get delegation token from kms provider. | Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
-| [YARN-6704](https://issues.apache.org/jira/browse/YARN-6704) | Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService | Major | . | Botong Huang | Botong Huang |
-| [HDFS-12875](https://issues.apache.org/jira/browse/HDFS-12875) | RBF: Complete logic for -readonly option of dfsrouteradmin add command | Major | . | Yiqun Lin | Íñigo Goiri |
-| [YARN-7630](https://issues.apache.org/jira/browse/YARN-7630) | Fix AMRMToken rollover handling in AMRMProxy | Minor | . | Botong Huang | Botong Huang |
-| [HDFS-12937](https://issues.apache.org/jira/browse/HDFS-12937) | RBF: Add more unit tests for router admin commands | Major | test | Yiqun Lin | Yiqun Lin |
-| [HDFS-12988](https://issues.apache.org/jira/browse/HDFS-12988) | RBF: Mount table entries not properly updated in the local cache | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | backport HADOOP-15086 rename fix to branch-2 | Major | fs/azure | Thomas Marquardt | Thomas Marquardt |
-| [YARN-7716](https://issues.apache.org/jira/browse/YARN-7716) | metricsTimeStart and metricsTimeEnd should be all lower case in the doc | Major | timelinereader | Haibo Chen | Haibo Chen |
-| [HDFS-12802](https://issues.apache.org/jira/browse/HDFS-12802) | RBF: Control MountTableResolver cache size | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance | Major | fs/oss | wujinhu | wujinhu |
-| [HDFS-13028](https://issues.apache.org/jira/browse/HDFS-13028) | RBF: Fix spurious TestRouterRpc#testProxyGetStats | Minor | . | Íñigo Goiri | Íñigo Goiri |
-| [YARN-5094](https://issues.apache.org/jira/browse/YARN-5094) | some YARN container events have timestamp of -1 | Critical | . | Sangjin Lee | Haibo Chen |
-| [YARN-7782](https://issues.apache.org/jira/browse/YARN-7782) | Enable user re-mapping for Docker containers in yarn-default.xml | Blocker | security, yarn | Eric Yang | Eric Yang |
-| [HDFS-12772](https://issues.apache.org/jira/browse/HDFS-12772) | RBF: Federation Router State State Store internal API | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HDFS-13042](https://issues.apache.org/jira/browse/HDFS-13042) | RBF: Heartbeat Router State | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HDFS-13049](https://issues.apache.org/jira/browse/HDFS-13049) | RBF: Inconsistent Router OPTS config in branch-2 and branch-3 | Minor | . | Wei Yan | Wei Yan |
-| [HDFS-12574](https://issues.apache.org/jira/browse/HDFS-12574) | Add CryptoInputStream to WebHdfsFileSystem read call. | Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
-| [HDFS-13044](https://issues.apache.org/jira/browse/HDFS-13044) | RBF: Add a safe mode for the Router | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HDFS-13043](https://issues.apache.org/jira/browse/HDFS-13043) | RBF: Expose the state of the Routers in the federation | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HDFS-13068](https://issues.apache.org/jira/browse/HDFS-13068) | RBF: Add router admin option to manage safe mode | Major | . | Íñigo Goiri | Yiqun Lin |
-| [HDFS-13119](https://issues.apache.org/jira/browse/HDFS-13119) | RBF: Manage unavailable clusters | Major | . | Íñigo Goiri | Yiqun Lin |
-| [HDFS-13187](https://issues.apache.org/jira/browse/HDFS-13187) | RBF: Fix Routers information shown in the web UI | Minor | . | Wei Yan | Wei Yan |
-| [HDFS-13184](https://issues.apache.org/jira/browse/HDFS-13184) | RBF: Improve the unit test TestRouterRPCClientRetries | Minor | test | Yiqun Lin | Yiqun Lin |
-| [HDFS-13199](https://issues.apache.org/jira/browse/HDFS-13199) | RBF: Fix the hdfs router page missing label icon issue | Major | federation, hdfs | maobaolong | maobaolong |
-| [HDFS-13214](https://issues.apache.org/jira/browse/HDFS-13214) | RBF: Complete document of Router configuration | Major | . | Tao Jie | Yiqun Lin |
-| [HDFS-13230](https://issues.apache.org/jira/browse/HDFS-13230) | RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns | Minor | . | Wei Yan | Chao Sun |
-| [HDFS-13233](https://issues.apache.org/jira/browse/HDFS-13233) | RBF: MountTableResolver doesn't return the correct mount point of the given path | Major | hdfs | wangzhiyuan | wangzhiyuan |
-| [HDFS-13212](https://issues.apache.org/jira/browse/HDFS-13212) | RBF: Fix router location cache issue | Major | federation, hdfs | Weiwei Wu | Weiwei Wu |
-| [HDFS-13232](https://issues.apache.org/jira/browse/HDFS-13232) | RBF: ConnectionPool should return first usable connection | Minor | . | Wei Yan | Ekanth S |
-| [HDFS-13240](https://issues.apache.org/jira/browse/HDFS-13240) | RBF: Update some inaccurate document descriptions | Minor | . | Yiqun Lin | Yiqun Lin |
-| [HDFS-11399](https://issues.apache.org/jira/browse/HDFS-11399) | Many tests fails in Windows due to injecting disk failures | Major | . | Yiqun Lin | Yiqun Lin |
-| [HDFS-13241](https://issues.apache.org/jira/browse/HDFS-13241) | RBF: TestRouterSafemode failed if the port 8888 is in use | Major | hdfs, test | maobaolong | maobaolong |
-| [HDFS-13253](https://issues.apache.org/jira/browse/HDFS-13253) | RBF: Quota management incorrect parent-child relationship judgement | Major | . | Yiqun Lin | Yiqun Lin |
-| [HDFS-13226](https://issues.apache.org/jira/browse/HDFS-13226) | RBF: Throw the exception if mount table entry validated failed | Major | hdfs | maobaolong | maobaolong |
-| [HDFS-12773](https://issues.apache.org/jira/browse/HDFS-12773) | RBF: Improve State Store FS implementation | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HDFS-13198](https://issues.apache.org/jira/browse/HDFS-13198) | RBF: RouterHeartbeatService throws out CachedStateStore related exceptions when starting router | Minor | . | Wei Yan | Wei Yan |
-| [HDFS-13224](https://issues.apache.org/jira/browse/HDFS-13224) | RBF: Resolvers to support mount points across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HADOOP-15262](https://issues.apache.org/jira/browse/HADOOP-15262) | AliyunOSS: move files under a directory in parallel when rename a directory | Major | fs/oss | wujinhu | wujinhu |
-| [HDFS-13215](https://issues.apache.org/jira/browse/HDFS-13215) | RBF: Move Router to its own module | Major | . | Íñigo Goiri | Wei Yan |
-| [HDFS-13250](https://issues.apache.org/jira/browse/HDFS-13250) | RBF: Router to manage requests across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HDFS-13318](https://issues.apache.org/jira/browse/HDFS-13318) | RBF: Fix FindBugs in hadoop-hdfs-rbf | Minor | . | Íñigo Goiri | Ekanth S |
-| [HDFS-12792](https://issues.apache.org/jira/browse/HDFS-12792) | RBF: Test Router-based federation using HDFSContract | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HDFS-12512](https://issues.apache.org/jira/browse/HDFS-12512) | RBF: Add WebHDFS | Major | fs | Íñigo Goiri | Wei Yan |
-| [HDFS-13291](https://issues.apache.org/jira/browse/HDFS-13291) | RBF: Implement available space based OrderResolver | Major | . | Yiqun Lin | Yiqun Lin |
-| [HDFS-13204](https://issues.apache.org/jira/browse/HDFS-13204) | RBF: Optimize name service safe mode icon | Minor | . | liuhongtong | liuhongtong |
-| [HDFS-13352](https://issues.apache.org/jira/browse/HDFS-13352) | RBF: Add xsl stylesheet for hdfs-rbf-default.xml | Major | documentation | Takanobu Asanuma | Takanobu Asanuma |
-| [YARN-8010](https://issues.apache.org/jira/browse/YARN-8010) | Add config in FederationRMFailoverProxy to not bypass facade cache when failing over | Minor | . | Botong Huang | Botong Huang |
-| [HDFS-13347](https://issues.apache.org/jira/browse/HDFS-13347) | RBF: Cache datanode reports | Minor | . | Íñigo Goiri | Íñigo Goiri |
-| [HDFS-13289](https://issues.apache.org/jira/browse/HDFS-13289) | RBF: TestConnectionManager#testCleanup() test case need correction | Minor | . | Dibyendu Karmakar | Dibyendu Karmakar |
-| [HDFS-13364](https://issues.apache.org/jira/browse/HDFS-13364) | RBF: Support NamenodeProtocol in the Router | Major | . | Íñigo Goiri | Íñigo Goiri |
-| [HADOOP-14651](https://issues.apache.org/jira/browse/HADOOP-14651) | Update okhttp version to 2.7.5 | Major | fs/adl | Ray Chiang | Ray Chiang |
-| [HADOOP-14999](https://issues.apache.org/jira/browse/HADOOP-14999) | AliyunOSS: provide one asynchronous multi-part based uploading mechanism | Major | fs/oss | Genmao Yu | Genmao Yu |
-
-
-### OTHER:
-
-| JIRA | Summary | Priority | Component | Reporter | Contributor |
-|:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-15149](https://issues.apache.org/jira/browse/HADOOP-15149) | CryptoOutputStream should implement StreamCapabilities | Major | fs | Mike Drob | Xiao Chen |
-| [YARN-7691](https://issues.apache.org/jira/browse/YARN-7691) | Add Unit Tests for ContainersLauncher | Major | . | Sampada Dehankar | Sampada Dehankar |
-| [HADOOP-15177](https://issues.apache.org/jira/browse/HADOOP-15177) | Update the release year to 2018 | Blocker | build | Akira Ajisaka | Bharat Viswanadham |
-
-
http://git-wip-us.apache.org/repos/asf/hadoop/blob/66c9905b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
deleted file mode 100644
index bed70b1..0000000
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
+++ /dev/null
@@ -1,88 +0,0 @@
-
-<!---
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
--->
-# "Apache Hadoop" 2.9.1 Release Notes
-
-These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
-
-
----
-
-* [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | *Major* | **Incorporate Aliyun OSS file system implementation**
-
-Aliyun OSS is widely used among China’s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss scheme, similar to the s3a and azure support.
-
-
----
-
-* [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | *Major* | **AliyunOSS: backport Aliyun OSS module to branch-2**
-
-Aliyun OSS is widely used among China’s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss:// scheme, similar to the s3a and azure support.
-
-
----
-
-* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | **RBF: Document Router and State Store metrics**
-
-This JIRA makes following change:
-Change Router metrics context from 'router' to 'dfs'.
-
-
----
-
-* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | **RBF: Add ACL support for mount table**
-
-Mount tables support ACL, The users won't be able to modify their own entries (we are assuming these old (no-permissions before) mount table with owner:superuser, group:supergroup, permission:755 as the default permissions). The fix way is login as superuser to modify these mount table entries.
-
-
----
-
-* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | **Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath**
-
-Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath.
-
-
----
-
-* [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | *Major* | **backport HADOOP-15086 rename fix to branch-2**
-
-[WASB] Fix Azure implementation of Filesystem.rename to ensure that at most one operation succeeds when there are multiple, concurrent rename operations targeting the same destination file.
-
-
----
-
-* [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | *Major* | **AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance**
-
-Support multi-thread pre-read in AliyunOSSInputStream to improve the sequential read performance from Hadoop to Aliyun OSS.
-
-
----
-
-* [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | *Major* | **RBF: Fix doc error setting up client**
-
-Fix the document error of setting up HFDS Router Federation
-
-
----
-
-* [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | *Minor* | **RBF: Use the ZooKeeper as the default State Store**
-
-Change default State Store from local file to ZooKeeper. This will require additional zk address to be configured.
-
-
-
http://git-wip-us.apache.org/repos/asf/hadoop/blob/66c9905b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
deleted file mode 100644
index a5d87c7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
+++ /dev/null
@@ -1,312 +0,0 @@
-<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
-<!-- Generated by the JDiff Javadoc doclet -->
-<!-- (http://www.jdiff.org) -->
-<!-- on Mon Apr 16 12:03:07 UTC 2018 -->
-
-<api
- xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
- xsi:noNamespaceSchemaLocation='api.xsd'
- name="Apache Hadoop HDFS 2.9.1"
- jdversion="1.0.9">
-
-<!-- Command line arguments = -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.9.1.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.9.1.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/4.41.1/nimbus-jose-jwt-4.41.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/1.3.1/json-smart-1.3.1.jar:/maven/org/apache/directory/serv
er/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.9.1.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus
/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/apache/commons/commons-lang3/3.4/commons-lang3-3.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoug
htworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.5/snappy-java-1.0.5.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-2.9.1.jar:/maven/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar:/maven/com/squareup/okio/okio/1.6.0/okio-1.6.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortb
ay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/xmlenc/xmlenc/0.52/xmlenc
-0.52.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.9.1 -->
-<package name="org.apache.hadoop.hdfs">
- <doc>
- <![CDATA[<p>A distributed implementation of {@link
-org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
-Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
-
-<p>The most important difference is that unlike GFS, Hadoop DFS files
-have strictly one writer at any one time. Bytes are always appended
-to the end of the writer's stream. There is no notion of "record appends"
-or "mutations" that are then checked or reordered. Writers simply emit
-a byte stream. That byte stream is guaranteed to be stored in the
-order written.</p>]]>
- </doc>
-</package>
-<package name="org.apache.hadoop.hdfs.net">
-</package>
-<package name="org.apache.hadoop.hdfs.protocol">
-</package>
-<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
-</package>
-<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
-</package>
-<package name="org.apache.hadoop.hdfs.protocolPB">
-</package>
-<package name="org.apache.hadoop.hdfs.qjournal.client">
-</package>
-<package name="org.apache.hadoop.hdfs.qjournal.protocol">
-</package>
-<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
-</package>
-<package name="org.apache.hadoop.hdfs.qjournal.server">
- <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
- <interface name="JournalNodeMXBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getJournalsStatus" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
-
- @return A string presenting status for each journal]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This is the JMX management interface for JournalNode information]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
-</package>
-<package name="org.apache.hadoop.hdfs.security.token.block">
-</package>
-<package name="org.apache.hadoop.hdfs.security.token.delegation">
-</package>
-<package name="org.apache.hadoop.hdfs.server.balancer">
-</package>
-<package name="org.apache.hadoop.hdfs.server.blockmanagement">
-</package>
-<package name="org.apache.hadoop.hdfs.server.common">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.web">
-</package>
-<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
-</package>
-<package name="org.apache.hadoop.hdfs.server.mover">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode">
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
- <interface name="AuditLogger" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <doc>
- <![CDATA[Called during initialization of the logger.
-
- @param conf The configuration object.]]>
- </doc>
- </method>
- <method name="logAuditEvent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="succeeded" type="boolean"/>
- <param name="userName" type="java.lang.String"/>
- <param name="addr" type="java.net.InetAddress"/>
- <param name="cmd" type="java.lang.String"/>
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
- <doc>
- <![CDATA[Called to log an audit event.
- <p>
- This method must return as quickly as possible, since it's called
- in a critical section of the NameNode's operation.
-
- @param succeeded Whether authorization succeeded.
- @param userName Name of the user executing the request.
- @param addr Remote address of the request.
- @param cmd The requested command.
- @param src Path of affected source file.
- @param dst Path of affected destination file (if any).
- @param stat File information for operations that change the file's
- metadata (permissions, owner, times, etc).]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Interface defining an audit logger.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
- <class name="HdfsAuditLogger" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
- <constructor name="HdfsAuditLogger"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="logAuditEvent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="succeeded" type="boolean"/>
- <param name="userName" type="java.lang.String"/>
- <param name="addr" type="java.net.InetAddress"/>
- <param name="cmd" type="java.lang.String"/>
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
- </method>
- <method name="logAuditEvent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="succeeded" type="boolean"/>
- <param name="userName" type="java.lang.String"/>
- <param name="addr" type="java.net.InetAddress"/>
- <param name="cmd" type="java.lang.String"/>
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
- <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
- <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
- <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
- <doc>
- <![CDATA[Same as
- {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
- FileStatus)} with additional parameters related to logging delegation token
- tracking IDs.
-
- @param succeeded Whether authorization succeeded.
- @param userName Name of the user executing the request.
- @param addr Remote address of the request.
- @param cmd The requested command.
- @param src Path of affected source file.
- @param dst Path of affected destination file (if any).
- @param stat File information for operations that change the file's metadata
- (permissions, owner, times, etc).
- @param callerContext Context information of the caller
- @param ugi UserGroupInformation of the current user, or null if not logging
- token tracking information
- @param dtSecretManager The token secret manager, or null if not logging
- token tracking information]]>
- </doc>
- </method>
- <method name="logAuditEvent"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="succeeded" type="boolean"/>
- <param name="userName" type="java.lang.String"/>
- <param name="addr" type="java.net.InetAddress"/>
- <param name="cmd" type="java.lang.String"/>
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
- <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
- <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
- <doc>
- <![CDATA[Same as
- {@link #logAuditEvent(boolean, String, InetAddress, String, String,
- String, FileStatus, CallerContext, UserGroupInformation,
- DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Extension of {@link AuditLogger}.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
- <class name="INodeAttributeProvider" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="INodeAttributeProvider"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="start"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Initialize the provider. This method is called at NameNode startup
- time.]]>
- </doc>
- </method>
- <method name="stop"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
- </doc>
- </method>
- <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="fullPath" type="java.lang.String"/>
- <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
- </method>
- <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathElements" type="java.lang.String[]"/>
- <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
- </method>
- <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="components" type="byte[][]"/>
- <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
- </method>
- <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
- <doc>
- <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
- Enforcer that can provide an alternate implementation of the
- default permission checking logic.
- @param defaultEnforcer The Default AccessControlEnforcer
- @return The AccessControlEnforcer to use]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.ha">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.top">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
-</package>
-<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
-</package>
-<package name="org.apache.hadoop.hdfs.server.protocol">
-</package>
-<package name="org.apache.hadoop.hdfs.tools">
-</package>
-<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
-</package>
-<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
-</package>
-<package name="org.apache.hadoop.hdfs.tools.snapshot">
-</package>
-<package name="org.apache.hadoop.hdfs.util">
-</package>
-<package name="org.apache.hadoop.hdfs.web">
-</package>
-<package name="org.apache.hadoop.hdfs.web.resources">
-</package>
-
-</api>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/66c9905b/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 5f83da3..cfaa698 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
<activeByDefault>false</activeByDefault>
</activation>
<properties>
- <jdiff.stable.api>2.9.1</jdiff.stable.api>
+ <jdiff.stable.api>3.0.2</jdiff.stable.api>
<jdiff.stability>-unstable</jdiff.stability>
<!-- Commented out for HADOOP-11776 -->
<!-- Uncomment param name="${jdiff.compatibility}" in javadoc doclet if compatibility is not empty -->
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[37/50] [abbrv] hadoop git commit: YARN-8266. [UI2] Clicking on
application from cluster view should redirect to application attempt page.
Contributed by Yesha Vora.
Posted by xy...@apache.org.
YARN-8266. [UI2] Clicking on application from cluster view should redirect to application attempt page. Contributed by Yesha Vora.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/796b2b0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/796b2b0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/796b2b0e
Branch: refs/heads/HDDS-4
Commit: 796b2b0ee36e8e9225fb76ae35edc58ad907b737
Parents: 2d00a0c
Author: Sunil G <su...@apache.org>
Authored: Tue May 15 12:01:28 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue May 15 12:01:28 2018 +0530
----------------------------------------------------------------------
.../hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/796b2b0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
index 896d448..fcf6a1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
@@ -18,7 +18,7 @@
export default {
getApplicationLink: function(applicationId) {
- return "#/yarn-app/" + applicationId;
+ return "#/yarn-app/" + applicationId + '/attempts';
},
getQueueLink: function(queueName) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/50] [abbrv] hadoop git commit: YARN-8249. Fixed few REST APIs in
RMWebServices to have static-user check. Contributed by Sunil Govindan.
Posted by xy...@apache.org.
YARN-8249. Fixed few REST APIs in RMWebServices to have static-user check. Contributed by Sunil Govindan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d76fbbc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d76fbbc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d76fbbc9
Branch: refs/heads/HDDS-4
Commit: d76fbbc9b82e720d7d5188f9ae2f56a8d78f3a98
Parents: 84b305f
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Authored: Thu May 10 19:03:23 2018 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Committed: Thu May 10 19:05:35 2018 -0700
----------------------------------------------------------------------
.../webapp/AHSWebServices.java | 18 +-
.../hadoop/yarn/server/webapp/WebServices.java | 2 +-
.../resourcemanager/webapp/RMWebServices.java | 302 ++++++++-----------
.../webapp/TestRMWebServices.java | 2 +-
...tRMWebServicesHttpStaticUserPermissions.java | 12 +-
5 files changed, 142 insertions(+), 194 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d76fbbc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 755127b..9aa71a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -111,7 +111,7 @@ public class AHSWebServices extends WebServices {
public TimelineAbout about(
@Context HttpServletRequest req,
@Context HttpServletResponse res) {
- init(res);
+ initForReadableEndpoints(res);
return TimelineUtils.createTimelineAbout("Generic History Service API");
}
@@ -141,7 +141,7 @@ public class AHSWebServices extends WebServices {
@QueryParam("finishedTimeBegin") String finishBegin,
@QueryParam("finishedTimeEnd") String finishEnd,
@QueryParam("applicationTypes") Set<String> applicationTypes) {
- init(res);
+ initForReadableEndpoints(res);
validateStates(stateQuery, statesQuery);
return super.getApps(req, res, stateQuery, statesQuery, finalStatusQuery,
userQuery, queueQuery, count, startedBegin, startedEnd, finishBegin,
@@ -155,7 +155,7 @@ public class AHSWebServices extends WebServices {
@Override
public AppInfo getApp(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId) {
- init(res);
+ initForReadableEndpoints(res);
return super.getApp(req, res, appId);
}
@@ -166,7 +166,7 @@ public class AHSWebServices extends WebServices {
@Override
public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId) {
- init(res);
+ initForReadableEndpoints(res);
return super.getAppAttempts(req, res, appId);
}
@@ -178,7 +178,7 @@ public class AHSWebServices extends WebServices {
public AppAttemptInfo getAppAttempt(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId,
@PathParam("appattemptid") String appAttemptId) {
- init(res);
+ initForReadableEndpoints(res);
return super.getAppAttempt(req, res, appId, appAttemptId);
}
@@ -190,7 +190,7 @@ public class AHSWebServices extends WebServices {
public ContainersInfo getContainers(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId,
@PathParam("appattemptid") String appAttemptId) {
- init(res);
+ initForReadableEndpoints(res);
return super.getContainers(req, res, appId, appAttemptId);
}
@@ -203,7 +203,7 @@ public class AHSWebServices extends WebServices {
@Context HttpServletResponse res, @PathParam("appid") String appId,
@PathParam("appattemptid") String appAttemptId,
@PathParam("containerid") String containerId) {
- init(res);
+ initForReadableEndpoints(res);
return super.getContainer(req, res, appId, appAttemptId, containerId);
}
@@ -257,7 +257,7 @@ public class AHSWebServices extends WebServices {
@QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE)
@DefaultValue("false") boolean redirected_from_node) {
ContainerId containerId = null;
- init(res);
+ initForReadableEndpoints(res);
try {
containerId = ContainerId.fromString(containerIdStr);
} catch (IllegalArgumentException e) {
@@ -392,7 +392,7 @@ public class AHSWebServices extends WebServices {
@QueryParam(YarnWebServiceParams.NM_ID) String nmId,
@QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE)
@DefaultValue("false") boolean redirected_from_node) {
- init(res);
+ initForReadableEndpoints(res);
ContainerId containerId;
try {
containerId = ContainerId.fromString(containerIdStr);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d76fbbc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
index 1399099..df4656f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
@@ -387,7 +387,7 @@ public class WebServices {
return new ContainerInfo(container);
}
- protected void init(HttpServletResponse response) {
+ protected void initForReadableEndpoints(HttpServletResponse response) {
// clear content type
response.setContentType(null);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d76fbbc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 0564b67..69c9562 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -272,9 +272,49 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
return true;
}
- private void init() {
+ /**
+ * initForReadableEndpoints does the init for all readable REST end points.
+ */
+ private void initForReadableEndpoints() {
+ // clear content type
+ response.setContentType(null);
+ }
+
+ /**
+ * initForWritableEndpoints does the init and acls verification for all
+ * writable REST end points.
+ *
+ * @param callerUGI
+ * remote caller who initiated the request
+ * @param doAdminACLsCheck
+ * boolean flag to indicate whether ACLs check is needed
+ * @throws AuthorizationException
+ * in case of no access to perfom this op.
+ */
+ private void initForWritableEndpoints(UserGroupInformation callerUGI,
+ boolean doAdminACLsCheck) throws AuthorizationException {
// clear content type
response.setContentType(null);
+
+ if (callerUGI == null) {
+ String msg = "Unable to obtain user name, user not authenticated";
+ throw new AuthorizationException(msg);
+ }
+
+ if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
+ String msg = "The default static user cannot carry out this operation.";
+ throw new ForbiddenException(msg);
+ }
+
+ if (doAdminACLsCheck) {
+ ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
+ if (aclsManager.areACLsEnabled()) {
+ if (!aclsManager.isAdmin(callerUGI)) {
+ String msg = "Only admins can carry out this operation.";
+ throw new ForbiddenException(msg);
+ }
+ }
+ }
}
@GET
@@ -291,7 +331,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public ClusterInfo getClusterInfo() {
- init();
+ initForReadableEndpoints();
return new ClusterInfo(this.rm);
}
@@ -301,7 +341,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public ClusterMetricsInfo getClusterMetricsInfo() {
- init();
+ initForReadableEndpoints();
return new ClusterMetricsInfo(this.rm);
}
@@ -311,7 +351,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public SchedulerTypeInfo getSchedulerInfo() {
- init();
+ initForReadableEndpoints();
+
ResourceScheduler rs = rm.getResourceScheduler();
SchedulerInfo sinfo;
if (rs instanceof CapacityScheduler) {
@@ -336,15 +377,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public String dumpSchedulerLogs(@FormParam(RMWSConsts.TIME) String time,
@Context HttpServletRequest hsr) throws IOException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
- if (aclsManager.areACLsEnabled()) {
- if (callerUGI == null || !aclsManager.isAdmin(callerUGI)) {
- String msg = "Only admins can carry out this operation.";
- throw new ForbiddenException(msg);
- }
- }
+ initForWritableEndpoints(callerUGI, true);
+
ResourceScheduler rs = rm.getResourceScheduler();
int period = Integer.parseInt(time);
if (period <= 0) {
@@ -370,7 +405,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public NodesInfo getNodes(@QueryParam(RMWSConsts.STATES) String states) {
- init();
+ initForReadableEndpoints();
+
ResourceScheduler sched = this.rm.getResourceScheduler();
if (sched == null) {
throw new NotFoundException("Null ResourceScheduler instance");
@@ -409,7 +445,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public NodeInfo getNode(@PathParam(RMWSConsts.NODEID) String nodeId) {
- init();
+ initForReadableEndpoints();
+
if (nodeId == null || nodeId.isEmpty()) {
throw new NotFoundException("nodeId, " + nodeId + ", is empty or null");
}
@@ -453,6 +490,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@QueryParam(RMWSConsts.APPLICATION_TYPES) Set<String> applicationTypes,
@QueryParam(RMWSConsts.APPLICATION_TAGS) Set<String> applicationTags,
@QueryParam(RMWSConsts.DESELECTS) Set<String> unselectedFields) {
+
+ initForReadableEndpoints();
+
boolean checkCount = false;
boolean checkStart = false;
boolean checkEnd = false;
@@ -467,7 +507,6 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
long fBegin = 0;
long fEnd = Long.MAX_VALUE;
- init();
if (count != null && !count.isEmpty()) {
checkCount = true;
countNum = Long.parseLong(count);
@@ -633,8 +672,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public ActivitiesInfo getActivities(@Context HttpServletRequest hsr,
@QueryParam(RMWSConsts.NODEID) String nodeId) {
- YarnScheduler scheduler = rm.getRMContext().getScheduler();
+ initForReadableEndpoints();
+ YarnScheduler scheduler = rm.getRMContext().getScheduler();
if (scheduler instanceof AbstractYarnScheduler) {
String errMessage = "";
@@ -706,8 +746,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr,
@QueryParam(RMWSConsts.APP_ID) String appId,
@QueryParam(RMWSConsts.MAX_TIME) String time) {
- YarnScheduler scheduler = rm.getRMContext().getScheduler();
+ initForReadableEndpoints();
+ YarnScheduler scheduler = rm.getRMContext().getScheduler();
if (scheduler instanceof AbstractYarnScheduler) {
AbstractYarnScheduler abstractYarnScheduler =
(AbstractYarnScheduler) scheduler;
@@ -760,7 +801,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr,
@QueryParam(RMWSConsts.STATES) Set<String> stateQueries,
@QueryParam(RMWSConsts.APPLICATION_TYPES) Set<String> typeQueries) {
- init();
+ initForReadableEndpoints();
// parse the params and build the scoreboard
// converting state/type name to lowercase
@@ -847,7 +888,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
public AppInfo getApp(@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId,
@QueryParam(RMWSConsts.DESELECTS) Set<String> unselectedFields) {
- init();
+ initForReadableEndpoints();
+
ApplicationId id = WebAppUtils.parseApplicationId(recordFactory, appId);
RMApp app = rm.getRMContext().getRMApps().get(id);
if (app == null) {
@@ -868,8 +910,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId) {
+ initForReadableEndpoints();
- init();
ApplicationId id = WebAppUtils.parseApplicationId(recordFactory, appId);
RMApp app = rm.getRMContext().getRMApps().get(id);
if (app == null) {
@@ -895,7 +937,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest req, @Context HttpServletResponse res,
@PathParam(RMWSConsts.APPID) String appId,
@PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId) {
- init(res);
+ initForReadableEndpoints(res);
return super.getAppAttempt(req, res, appId, appAttemptId);
}
@@ -908,7 +950,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletResponse res,
@PathParam(RMWSConsts.APPID) String appId,
@PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId) {
- init(res);
+ initForReadableEndpoints(res);
return super.getContainers(req, res, appId, appAttemptId);
}
@@ -922,7 +964,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@PathParam(RMWSConsts.APPID) String appId,
@PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId,
@PathParam("containerid") String containerId) {
- init(res);
+ initForReadableEndpoints(res);
return super.getContainer(req, res, appId, appAttemptId, containerId);
}
@@ -933,7 +975,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public AppState getAppState(@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
- init();
+ initForReadableEndpoints();
+
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
String userName = "";
if (callerUGI != null) {
@@ -969,18 +1012,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
YarnException, InterruptedException, IOException {
-
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- String msg = "Unable to obtain user name, user not authenticated";
- throw new AuthorizationException(msg);
- }
-
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- String msg = "The default static user cannot carry out this operation.";
- return Response.status(Status.FORBIDDEN).entity(msg).build();
- }
+ initForWritableEndpoints(callerUGI, false);
String userName = callerUGI.getUserName();
RMApp app = null;
@@ -1019,7 +1052,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public NodeToLabelsInfo getNodeToLabels(@Context HttpServletRequest hsr)
throws IOException {
- init();
+ initForReadableEndpoints();
NodeToLabelsInfo ntl = new NodeToLabelsInfo();
HashMap<String, NodeLabelsInfo> ntlMap = ntl.getNodeToLabels();
@@ -1041,7 +1074,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public LabelsToNodesInfo getLabelsToNodes(
@QueryParam(RMWSConsts.LABELS) Set<String> labels) throws IOException {
- init();
+ initForReadableEndpoints();
LabelsToNodesInfo lts = new LabelsToNodesInfo();
Map<NodeLabelInfo, NodeIDsInfo> ltsMap = lts.getLabelsToNodes();
@@ -1073,6 +1106,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
public Response replaceLabelsOnNodes(
final NodeToLabelsEntryList newNodeToLabels,
@Context HttpServletRequest hsr) throws IOException {
+ UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+ initForWritableEndpoints(callerUGI, false);
+
Map<NodeId, Set<String>> nodeIdToLabels =
new HashMap<NodeId, Set<String>>();
@@ -1094,6 +1130,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@QueryParam("labels") Set<String> newNodeLabelsName,
@Context HttpServletRequest hsr, @PathParam("nodeId") String nodeId)
throws Exception {
+ UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+ initForWritableEndpoints(callerUGI, false);
+
NodeId nid = ConverterUtils.toNodeIdWithDefaultPort(nodeId);
Map<NodeId, Set<String>> newLabelsForNode =
new HashMap<NodeId, Set<String>>();
@@ -1106,7 +1145,6 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
private Response replaceLabelsOnNode(
Map<NodeId, Set<String>> newLabelsForNode, HttpServletRequest hsr,
String operation) throws IOException {
- init();
NodeLabelsUtils.verifyCentralizedNodeLabelConfEnabled("replaceLabelsOnNode",
isCentralizedNodeLabelConfiguration);
@@ -1140,7 +1178,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public NodeLabelsInfo getClusterNodeLabels(@Context HttpServletRequest hsr)
throws IOException {
- init();
+ initForReadableEndpoints();
List<NodeLabel> nodeLabels =
rm.getRMContext().getNodeLabelManager().getClusterNodeLabels();
@@ -1156,14 +1194,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public Response addToClusterNodeLabels(final NodeLabelsInfo newNodeLabels,
@Context HttpServletRequest hsr) throws Exception {
- init();
-
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- String msg = "Unable to obtain user name, user not authenticated for"
- + " post to .../add-node-labels";
- throw new AuthorizationException(msg);
- }
+ initForWritableEndpoints(callerUGI, false);
+
if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) {
String msg = "User " + callerUGI.getShortUserName() + " not authorized"
+ " for post to .../add-node-labels ";
@@ -1189,14 +1222,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
public Response removeFromCluserNodeLabels(
@QueryParam(RMWSConsts.LABELS) Set<String> oldNodeLabels,
@Context HttpServletRequest hsr) throws Exception {
- init();
-
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- String msg = "Unable to obtain user name, user not authenticated for"
- + " post to .../remove-node-labels";
- throw new AuthorizationException(msg);
- }
+ initForWritableEndpoints(callerUGI, false);
+
if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) {
String msg = "User " + callerUGI.getShortUserName() + " not authorized"
+ " for post to .../remove-node-labels ";
@@ -1220,7 +1248,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public NodeLabelsInfo getLabelsOnNode(@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.NODEID) String nodeId) throws IOException {
- init();
+ initForReadableEndpoints();
NodeId nid = ConverterUtils.toNodeIdWithDefaultPort(nodeId);
List<NodeLabel> labels = new ArrayList<NodeLabel>(
@@ -1290,7 +1318,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public AppPriority getAppPriority(@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
- init();
+ initForReadableEndpoints();
+
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
String userName = "UNKNOWN-USER";
if (callerUGI != null) {
@@ -1322,21 +1351,11 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
YarnException, InterruptedException, IOException {
- init();
- if (targetPriority == null) {
- throw new YarnException("Target Priority cannot be null");
- }
-
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- throw new AuthorizationException(
- "Unable to obtain user name, user not authenticated");
- }
+ initForWritableEndpoints(callerUGI, false);
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- return Response.status(Status.FORBIDDEN)
- .entity("The default static user cannot carry out this operation.")
- .build();
+ if (targetPriority == null) {
+ throw new YarnException("Target Priority cannot be null");
}
String userName = callerUGI.getUserName();
@@ -1407,7 +1426,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public AppQueue getAppQueue(@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
- init();
+ initForReadableEndpoints();
+
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
String userName = "UNKNOWN-USER";
if (callerUGI != null) {
@@ -1440,17 +1460,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
YarnException, InterruptedException, IOException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- String msg = "Unable to obtain user name, user not authenticated";
- throw new AuthorizationException(msg);
- }
-
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- String msg = "The default static user cannot carry out this operation.";
- return Response.status(Status.FORBIDDEN).entity(msg).build();
- }
+ initForWritableEndpoints(callerUGI, false);
String userName = callerUGI.getUserName();
RMApp app = null;
@@ -1561,16 +1572,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public Response createNewApplication(@Context HttpServletRequest hsr)
throws AuthorizationException, IOException, InterruptedException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- throw new AuthorizationException(
- "Unable to obtain user name, " + "user not authenticated");
- }
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- String msg = "The default static user cannot carry out this operation.";
- return Response.status(Status.FORBIDDEN).entity(msg).build();
- }
+ initForWritableEndpoints(callerUGI, false);
NewApplication appId = createNewApplication();
return Response.status(Status.OK).entity(appId).build();
@@ -1590,17 +1593,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr)
throws AuthorizationException, IOException, InterruptedException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- throw new AuthorizationException(
- "Unable to obtain user name, " + "user not authenticated");
- }
-
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- String msg = "The default static user cannot carry out this operation.";
- return Response.status(Status.FORBIDDEN).entity(msg).build();
- }
+ initForWritableEndpoints(callerUGI, false);
ApplicationSubmissionContext appContext =
RMWebAppUtil.createAppSubmissionContext(newApp, conf);
@@ -1654,14 +1648,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
return appId;
}
- private UserGroupInformation createKerberosUserGroupInformation(
- HttpServletRequest hsr) throws AuthorizationException, YarnException {
-
- UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- String msg = "Unable to obtain user name, user not authenticated";
- throw new AuthorizationException(msg);
- }
+ private void createKerberosUserGroupInformation(HttpServletRequest hsr,
+ UserGroupInformation callerUGI)
+ throws AuthorizationException, YarnException {
String authType = hsr.getAuthType();
if (!KerberosAuthenticationHandler.TYPE.equalsIgnoreCase(authType)) {
@@ -1672,14 +1661,10 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
}
if (hsr.getAttribute(
DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE) != null) {
- String msg =
- "Delegation token operations cannot be carried out using delegation"
- + " token authentication.";
+ String msg = "Delegation token operations cannot be carried out using "
+ + "delegation token authentication.";
throw new YarnException(msg);
}
-
- callerUGI.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
- return callerUGI;
}
@POST
@@ -1692,10 +1677,12 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr) throws AuthorizationException,
IOException, InterruptedException, Exception {
- init();
- UserGroupInformation callerUGI;
+ UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+ initForWritableEndpoints(callerUGI, false);
+
try {
- callerUGI = createKerberosUserGroupInformation(hsr);
+ createKerberosUserGroupInformation(hsr, callerUGI);
+ callerUGI.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
} catch (YarnException ye) {
return Response.status(Status.FORBIDDEN).entity(ye.getMessage()).build();
}
@@ -1712,10 +1699,12 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
throws AuthorizationException, IOException, InterruptedException,
Exception {
- init();
- UserGroupInformation callerUGI;
+ UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+ initForWritableEndpoints(callerUGI, false);
+
try {
- callerUGI = createKerberosUserGroupInformation(hsr);
+ createKerberosUserGroupInformation(hsr, callerUGI);
+ callerUGI.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
} catch (YarnException ye) {
return Response.status(Status.FORBIDDEN).entity(ye.getMessage()).build();
}
@@ -1827,10 +1816,12 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
throws AuthorizationException, IOException, InterruptedException,
Exception {
- init();
- UserGroupInformation callerUGI;
+ UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+ initForWritableEndpoints(callerUGI, false);
+
try {
- callerUGI = createKerberosUserGroupInformation(hsr);
+ createKerberosUserGroupInformation(hsr, callerUGI);
+ callerUGI.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
} catch (YarnException ye) {
return Response.status(Status.FORBIDDEN).entity(ye.getMessage()).build();
}
@@ -1904,16 +1895,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public Response createNewReservation(@Context HttpServletRequest hsr)
throws AuthorizationException, IOException, InterruptedException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- throw new AuthorizationException(
- "Unable to obtain user name, " + "user not authenticated");
- }
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- String msg = "The default static user cannot carry out this operation.";
- return Response.status(Status.FORBIDDEN).entity(msg).build();
- }
+ initForWritableEndpoints(callerUGI, false);
NewReservation reservationId = createNewReservation();
return Response.status(Status.OK).entity(reservationId).build();
@@ -1953,16 +1936,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr)
throws AuthorizationException, IOException, InterruptedException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- throw new AuthorizationException(
- "Unable to obtain user name, " + "user not authenticated");
- }
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- String msg = "The default static user cannot carry out this operation.";
- return Response.status(Status.FORBIDDEN).entity(msg).build();
- }
+ initForWritableEndpoints(callerUGI, false);
final ReservationSubmissionRequest reservation =
createReservationSubmissionRequest(resContext);
@@ -2051,16 +2026,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr)
throws AuthorizationException, IOException, InterruptedException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- throw new AuthorizationException(
- "Unable to obtain user name, " + "user not authenticated");
- }
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- String msg = "The default static user cannot carry out this operation.";
- return Response.status(Status.FORBIDDEN).entity(msg).build();
- }
+ initForWritableEndpoints(callerUGI, false);
final ReservationUpdateRequest reservation =
createReservationUpdateRequest(resContext);
@@ -2150,16 +2117,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr)
throws AuthorizationException, IOException, InterruptedException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- throw new AuthorizationException(
- "Unable to obtain user name, " + "user not authenticated");
- }
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- String msg = "The default static user cannot carry out this operation.";
- return Response.status(Status.FORBIDDEN).entity(msg).build();
- }
+ initForWritableEndpoints(callerUGI, false);
final ReservationDeleteRequest reservation =
createReservationDeleteRequest(resContext);
@@ -2207,7 +2166,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@QueryParam(RMWSConsts.END_TIME) @DefaultValue(DEFAULT_END_TIME) long endTime,
@QueryParam(RMWSConsts.INCLUDE_RESOURCE) @DefaultValue(DEFAULT_INCLUDE_RESOURCE) boolean includeResourceAllocations,
@Context HttpServletRequest hsr) throws Exception {
- init();
+ initForReadableEndpoints();
final ReservationListRequest request = ReservationListRequest.newInstance(
queue, reservationId, startTime, endTime, includeResourceAllocations);
@@ -2253,7 +2212,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
public AppTimeoutInfo getAppTimeout(@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId,
@PathParam(RMWSConsts.TYPE) String type) throws AuthorizationException {
- init();
+ initForReadableEndpoints();
RMApp app = validateAppTimeoutRequest(hsr, appId);
ApplicationTimeoutType appTimeoutType = parseTimeoutType(type);
@@ -2297,7 +2256,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Override
public AppTimeoutsInfo getAppTimeouts(@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
- init();
+ initForReadableEndpoints();
RMApp app = validateAppTimeoutRequest(hsr, appId);
@@ -2355,19 +2314,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@Context HttpServletRequest hsr,
@PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
YarnException, InterruptedException, IOException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- if (callerUGI == null) {
- throw new AuthorizationException(
- "Unable to obtain user name, user not authenticated");
- }
-
- if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
- return Response.status(Status.FORBIDDEN)
- .entity("The default static user cannot carry out this operation.")
- .build();
- }
+ initForWritableEndpoints(callerUGI, false);
String userName = callerUGI.getUserName();
RMApp app = null;
@@ -2480,16 +2429,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
public synchronized Response updateSchedulerConfiguration(SchedConfUpdateInfo
mutationInfo, @Context HttpServletRequest hsr)
throws AuthorizationException, InterruptedException {
- init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
- ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
- if (aclsManager.areACLsEnabled()) {
- if (callerUGI == null || !aclsManager.isAdmin(callerUGI)) {
- String msg = "Only admins can carry out this operation.";
- throw new ForbiddenException(msg);
- }
- }
+ initForWritableEndpoints(callerUGI, true);
ResourceScheduler scheduler = rm.getResourceScheduler();
if (scheduler instanceof MutableConfScheduler && ((MutableConfScheduler)
@@ -2541,7 +2483,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
@QueryParam(RMWSConsts.QUEUE_ACL_TYPE)
@DefaultValue("SUBMIT_APPLICATIONS") String queueAclType,
@Context HttpServletRequest hsr) throws AuthorizationException {
- init();
+ initForReadableEndpoints();
// For the user who invokes this REST call, he/she should have admin access
// to the queue. Otherwise we will reject the call.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d76fbbc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 9c4acc2..0702d65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -684,7 +684,7 @@ public class TestRMWebServices extends JerseyTestBase {
ResourceManager mockRM = mock(ResourceManager.class);
Configuration conf = new YarnConfiguration();
- HttpServletRequest mockHsr = mock(HttpServletRequest.class);
+ HttpServletRequest mockHsr = mockHttpServletRequestByUserName("non-admin");
ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf);
when(mockRM.getApplicationACLsManager()).thenReturn(aclsManager);
RMWebServices webSvc =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d76fbbc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
index 60c6f5e..cef32f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -179,15 +180,20 @@ public class TestRMWebServicesHttpStaticUserPermissions {
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
InputStream errorStream = conn.getErrorStream();
String error = "";
- BufferedReader reader =
- new BufferedReader(new InputStreamReader(errorStream, "UTF8"));
+ BufferedReader reader = new BufferedReader(
+ new InputStreamReader(errorStream, "UTF8"));
for (String line; (line = reader.readLine()) != null;) {
error += line;
}
reader.close();
errorStream.close();
+ JSONObject errResponse = new JSONObject(error);
+ JSONObject remoteException = errResponse
+ .getJSONObject("RemoteException");
assertEquals(
- "The default static user cannot carry out this operation.", error);
+ "java.lang.Exception: The default static user cannot carry out "
+ + "this operation.",
+ remoteException.getString("message"));
}
conn.disconnect();
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[38/50] [abbrv] hadoop git commit: YARN-8166. [UI2] Service page
header links are broken. Contributed by Yesha Vora.
Posted by xy...@apache.org.
YARN-8166. [UI2] Service page header links are broken. Contributed by Yesha Vora.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffb9210d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffb9210d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffb9210d
Branch: refs/heads/HDDS-4
Commit: ffb9210dedb79a56075448dc296251896bed49e6
Parents: 796b2b0
Author: Sunil G <su...@apache.org>
Authored: Tue May 15 12:13:04 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue May 15 12:13:04 2018 +0530
----------------------------------------------------------------------
.../src/main/webapp/app/controllers/yarn-component-instance.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffb9210d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js
index 8d28c22..61ebae1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js
@@ -48,7 +48,7 @@ export default Ember.Controller.extend({
text: 'Components'
}, {
text: `${componentName}`,
- href: `#/yarn-component-instances/${componentName}/components?service=${serviceName}&&appid=${appId}`
+ href: `#/yarn-component-instances/${componentName}/info?service=${serviceName}&&appid=${appId}`
}, {
text: `${instanceName}`
});
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[19/50] [abbrv] hadoop git commit: YARN-8265. Improve DNS handling on
docker IP changes. Contributed by Billie Rinaldi
Posted by xy...@apache.org.
YARN-8265. Improve DNS handling on docker IP changes.
Contributed by Billie Rinaldi
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ff94563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ff94563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ff94563
Branch: refs/heads/HDDS-4
Commit: 0ff94563b9b62d0426d475dc0f84152b68f1ff0d
Parents: 6c8e51c
Author: Eric Yang <ey...@HW13750.local>
Authored: Fri May 11 22:37:43 2018 -0700
Committer: Eric Yang <ey...@HW13750.local>
Committed: Fri May 11 22:37:43 2018 -0700
----------------------------------------------------------------------
.../component/instance/ComponentInstance.java | 45 ++++++++---
.../hadoop/yarn/service/MockServiceAM.java | 17 ++++-
.../hadoop/yarn/service/TestServiceAM.java | 42 ++++++++++
.../linux/runtime/docker/TestDockerClient.java | 80 ++++++++++++++++++++
4 files changed, 173 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ff94563/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 4aca0ea..a323649 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.service.ServiceScheduler;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
import org.apache.hadoop.yarn.service.api.records.ContainerState;
import org.apache.hadoop.yarn.service.component.Component;
import org.apache.hadoop.yarn.service.component.ComponentEvent;
@@ -151,10 +152,19 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
@Override public void transition(ComponentInstance compInstance,
ComponentInstanceEvent event) {
// Query container status for ip and host
+ boolean cancelOnSuccess = true;
+ if (compInstance.getCompSpec().getArtifact() != null && compInstance
+ .getCompSpec().getArtifact().getType() == Artifact.TypeEnum.DOCKER) {
+ // A docker container might get a different IP if the container is
+ // relaunched by the NM, so we need to keep checking the status.
+ // This is a temporary fix until the NM provides a callback for
+ // container relaunch (see YARN-8265).
+ cancelOnSuccess = false;
+ }
compInstance.containerStatusFuture =
compInstance.scheduler.executorService.scheduleAtFixedRate(
new ContainerStatusRetriever(compInstance.scheduler,
- event.getContainerId(), compInstance), 0, 1,
+ event.getContainerId(), compInstance, cancelOnSuccess), 0, 1,
TimeUnit.SECONDS);
long containerStartTime = System.currentTimeMillis();
try {
@@ -373,14 +383,26 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
this.status = status;
org.apache.hadoop.yarn.service.api.records.Container container =
getCompSpec().getContainer(status.getContainerId().toString());
+ boolean doRegistryUpdate = true;
if (container != null) {
- container.setIp(StringUtils.join(",", status.getIPs()));
+ String existingIP = container.getIp();
+ String newIP = StringUtils.join(",", status.getIPs());
+ container.setIp(newIP);
container.setHostname(status.getHost());
- if (timelineServiceEnabled) {
+ if (existingIP != null && newIP.equals(existingIP)) {
+ doRegistryUpdate = false;
+ }
+ if (timelineServiceEnabled && doRegistryUpdate) {
serviceTimelinePublisher.componentInstanceIPHostUpdated(container);
}
}
- updateServiceRecord(yarnRegistryOperations, status);
+ if (doRegistryUpdate) {
+ cleanupRegistry(status.getContainerId());
+ LOG.info(
+ getCompInstanceId() + " new IP = " + status.getIPs() + ", host = "
+ + status.getHost() + ", updating registry");
+ updateServiceRecord(yarnRegistryOperations, status);
+ }
}
public String getCompName() {
@@ -522,12 +544,15 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
private NodeId nodeId;
private NMClient nmClient;
private ComponentInstance instance;
+ private boolean cancelOnSuccess;
ContainerStatusRetriever(ServiceScheduler scheduler,
- ContainerId containerId, ComponentInstance instance) {
+ ContainerId containerId, ComponentInstance instance, boolean
+ cancelOnSuccess) {
this.containerId = containerId;
this.nodeId = instance.getNodeId();
this.nmClient = scheduler.getNmClient().getClient();
this.instance = instance;
+ this.cancelOnSuccess = cancelOnSuccess;
}
@Override public void run() {
ContainerStatus status = null;
@@ -548,10 +573,12 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
return;
}
instance.updateContainerStatus(status);
- LOG.info(
- instance.compInstanceId + " IP = " + status.getIPs() + ", host = "
- + status.getHost() + ", cancel container status retriever");
- instance.containerStatusFuture.cancel(false);
+ if (cancelOnSuccess) {
+ LOG.info(
+ instance.compInstanceId + " IP = " + status.getIPs() + ", host = "
+ + status.getHost() + ", cancel container status retriever");
+ instance.containerStatusFuture.cancel(false);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ff94563/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
index 04b0347..4a75aef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
@@ -317,6 +317,14 @@ public class MockServiceAM extends ServiceMaster {
}
}
+ public Container updateContainerStatus(Service service, int id,
+ String compName, String host) {
+ ContainerId containerId = createContainerId(id);
+ Container container = createContainer(containerId, compName);
+ addContainerStatus(container, ContainerState.RUNNING, host);
+ return container;
+ }
+
public ContainerId createContainerId(int id) {
ApplicationId applicationId = ApplicationId.fromString(service.getId());
return ContainerId.newContainerId(
@@ -389,10 +397,15 @@ public class MockServiceAM extends ServiceMaster {
}
private void addContainerStatus(Container container, ContainerState state) {
+ addContainerStatus(container, state, container.getNodeId().getHost());
+ }
+
+ private void addContainerStatus(Container container, ContainerState state,
+ String host) {
ContainerStatus status = ContainerStatus.newInstance(container.getId(),
state, "", 0);
- status.setHost(container.getNodeId().getHost());
- status.setIPs(Lists.newArrayList(container.getNodeId().getHost()));
+ status.setHost(host);
+ status.setIPs(Lists.newArrayList(host));
containerStatuses.put(container.getId(), status);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ff94563/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
index 260976a..e9478f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.DockerCredentialTokenIdentifier;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
import org.apache.hadoop.yarn.service.api.records.Service;
@@ -349,4 +350,45 @@ public class TestServiceAM extends ServiceTestUtils{
am.stop();
}
+
+ @Test
+ public void testIPChange() throws TimeoutException,
+ InterruptedException {
+ ApplicationId applicationId = ApplicationId.newInstance(123456, 1);
+ String comp1Name = "comp1";
+ String comp1InstName = "comp1-0";
+ Service exampleApp = new Service();
+ exampleApp.setId(applicationId.toString());
+ exampleApp.setVersion("v1");
+ exampleApp.setName("testIPChange");
+ Component comp1 = createComponent(comp1Name, 1, "sleep 60");
+ comp1.setArtifact(new Artifact().type(Artifact.TypeEnum.DOCKER));
+ exampleApp.addComponent(comp1);
+
+ MockServiceAM am = new MockServiceAM(exampleApp);
+ am.init(conf);
+ am.start();
+
+ ComponentInstance comp1inst0 = am.getCompInstance(comp1Name, comp1InstName);
+ // allocate a container
+ am.feedContainerToComp(exampleApp, 1, comp1Name);
+ GenericTestUtils.waitFor(() -> comp1inst0.getContainerStatus() != null,
+ 2000, 200000);
+ // first host status will match the container nodeId
+ Assert.assertEquals("localhost",
+ comp1inst0.getContainerStatus().getHost());
+
+ LOG.info("Change the IP and host");
+ // change the container status
+ am.updateContainerStatus(exampleApp, 1, comp1Name, "new.host");
+ GenericTestUtils.waitFor(() -> comp1inst0.getContainerStatus().getHost()
+ .equals("new.host"), 2000, 200000);
+
+ LOG.info("Change the IP and host again");
+ // change the container status
+ am.updateContainerStatus(exampleApp, 1, comp1Name, "newer.host");
+ GenericTestUtils.waitFor(() -> comp1inst0.getContainerStatus().getHost()
+ .equals("newer.host"), 2000, 200000);
+ am.stop();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ff94563/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
new file mode 100644
index 0000000..efd7db5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+/** Unit tests for DockerClient. */
+public class TestDockerClient {
+ private static final File TEST_ROOT_DIR = GenericTestUtils.getTestDir(
+ TestDockerClient.class.getName());
+
+ @Before
+ public void setup() {
+ TEST_ROOT_DIR.mkdirs();
+ }
+
+ @After
+ public void cleanup() {
+ FileUtil.fullyDelete(TEST_ROOT_DIR);
+ }
+
+ @Test
+ public void testWriteCommandToTempFile() throws Exception {
+ String absRoot = TEST_ROOT_DIR.getAbsolutePath();
+ ApplicationId appId = ApplicationId.newInstance(1, 1);
+ ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+ ContainerId cid = ContainerId.newContainerId(attemptId, 1);
+ DockerCommand dockerCmd = new DockerInspectCommand(cid.toString());
+ Configuration conf = new Configuration();
+ conf.set("hadoop.tmp.dir", absRoot);
+ conf.set(YarnConfiguration.NM_LOCAL_DIRS, absRoot);
+ conf.set(YarnConfiguration.NM_LOG_DIRS, absRoot);
+ LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
+ Context mockContext = mock(Context.class);
+ doReturn(conf).when(mockContext).getConf();
+ doReturn(dirsHandler).when(mockContext).getLocalDirsHandler();
+
+ DockerClient dockerClient = new DockerClient(conf);
+ dirsHandler.init(conf);
+ dirsHandler.start();
+ String tmpPath = dockerClient.writeCommandToTempFile(dockerCmd, cid,
+ mockContext);
+ dirsHandler.stop();
+ File tmpFile = new File(tmpPath);
+ assertTrue(tmpFile + " was not created", tmpFile.exists());
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/50] [abbrv] hadoop git commit: HDDS-39. Ozone: Compile
Ozone/HDFS/Cblock protobuf files with proto3 compiler using maven protoc
plugin. Contributed by Mukul Kumar Singh.
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index b621a08..57d4287 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 2da6874..a5d268d 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.ozone.genesis;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.RandomStringUtils;
@@ -53,15 +53,22 @@ import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.CreateContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
-
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .CreateContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ReadChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .WriteChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .PutKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .GetKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerData;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 793ffb4..862a693 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -104,6 +104,11 @@
<mssql.version>6.2.1.jre7</mssql.version>
<okhttp.version>2.7.5</okhttp.version>
+ <!-- Maven protoc compiler -->
+ <protobuf-maven-plugin.version>0.5.1</protobuf-maven-plugin.version>
+ <protobuf-compile.version>3.1.0</protobuf-compile.version>
+ <os-maven-plugin.version>1.5.0.Final</os-maven-plugin.version>
+
<!-- define the Java language version used by the compiler -->
<javac.version>1.8</javac.version>
@@ -413,7 +418,7 @@
<version>${hadoop.version}</version>
</dependency>
- <dependency>
+ <dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-applications-distributedshell</artifactId>
<version>${hadoop.version}</version>
@@ -1737,8 +1742,8 @@
</ignores>
</configuration>
</plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[49/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth.
Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/996a627b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/996a627b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/996a627b
Branch: refs/heads/HDDS-4
Commit: 996a627b289947af3894bf83e7b63ec702a665cd
Parents: 1e95b5d
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue May 15 16:55:52 2018 -0700
----------------------------------------------------------------------
hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 ------------
1 file changed, 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/996a627b/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index deb286d..6998a85 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -120,18 +120,6 @@
</description>
</property>
<property>
- <name>dfs.ratis.client.request.timeout.duration</name>
- <value>3s</value>
- <tag>OZONE, RATIS, MANAGEMENT</tag>
- <description>The timeout duration for ratis client request.</description>
- </property>
- <property>
- <name>dfs.ratis.server.request.timeout.duration</name>
- <value>3s</value>
- <tag>OZONE, RATIS, MANAGEMENT</tag>
- <description>The timeout duration for ratis server request.</description>
- </property>
- <property>
<name>ozone.container.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[18/50] [abbrv] hadoop git commit: YARN-7654. Support ENTRY_POINT for
docker container. Contributed by Eric Yang
Posted by xy...@apache.org.
YARN-7654. Support ENTRY_POINT for docker container. Contributed by Eric Yang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c8e51ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c8e51ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c8e51ca
Branch: refs/heads/HDDS-4
Commit: 6c8e51ca7eaaeef0626658b3c45d446a537e4dc0
Parents: 4b4f24a
Author: Jason Lowe <jl...@apache.org>
Authored: Fri May 11 18:56:05 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri May 11 18:56:05 2018 -0500
----------------------------------------------------------------------
.../hadoop/yarn/api/ApplicationConstants.java | 9 +-
.../provider/AbstractProviderService.java | 91 ++++++++++-----
.../provider/docker/DockerProviderService.java | 42 +++++++
.../launcher/ContainerLaunch.java | 14 +++
.../runtime/DockerLinuxContainerRuntime.java | 20 ++--
.../linux/runtime/docker/DockerClient.java | 115 ++++++++++--------
.../linux/runtime/docker/DockerRunCommand.java | 44 +++++++
.../impl/container-executor.c | 116 ++++++++++++++++++-
.../container-executor/impl/utils/docker-util.c | 46 ++++++++
.../container-executor/impl/utils/docker-util.h | 14 +++
.../test/utils/test_docker_util.cc | 42 +++++++
.../src/site/markdown/DockerContainers.md | 1 +
12 files changed, 467 insertions(+), 87 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 38ad596..b63fe61 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -244,7 +244,14 @@ public interface ApplicationConstants {
* Comma separate list of directories that the container should use for
* logging.
*/
- LOG_DIRS("LOG_DIRS");
+ LOG_DIRS("LOG_DIRS"),
+
+ /**
+ * $YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE
+ * Final, Docker run support ENTRY_POINT.
+ */
+ YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE(
+ "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE");
private final String variable;
private Environment(String variable) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
index 5a17817..6d213c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
@@ -58,23 +58,26 @@ public abstract class AbstractProviderService implements ProviderService,
Service service)
throws IOException;
- public void buildContainerLaunchContext(AbstractLauncher launcher,
+ public Map<String, String> buildContainerTokens(ComponentInstance instance,
+ Container container,
+ ContainerLaunchService.ComponentLaunchContext compLaunchContext) {
+ // Generate tokens (key-value pair) for config substitution.
+ // Get pre-defined tokens
+ Map<String, String> globalTokens =
+ instance.getComponent().getScheduler().globalTokens;
+ Map<String, String> tokensForSubstitution = ProviderUtils
+ .initCompTokensForSubstitute(instance, container,
+ compLaunchContext);
+ tokensForSubstitution.putAll(globalTokens);
+ return tokensForSubstitution;
+ }
+
+ public void buildContainerEnvironment(AbstractLauncher launcher,
Service service, ComponentInstance instance,
SliderFileSystem fileSystem, Configuration yarnConf, Container container,
- ContainerLaunchService.ComponentLaunchContext compLaunchContext)
- throws IOException, SliderException {
- processArtifact(launcher, instance, fileSystem, service);
-
- ServiceContext context =
- instance.getComponent().getScheduler().getContext();
- // Generate tokens (key-value pair) for config substitution.
- // Get pre-defined tokens
- Map<String, String> globalTokens =
- instance.getComponent().getScheduler().globalTokens;
- Map<String, String> tokensForSubstitution = ProviderUtils
- .initCompTokensForSubstitute(instance, container,
- compLaunchContext);
- tokensForSubstitution.putAll(globalTokens);
+ ContainerLaunchService.ComponentLaunchContext compLaunchContext,
+ Map<String, String> tokensForSubstitution)
+ throws IOException, SliderException {
// Set the environment variables in launcher
launcher.putEnv(ServiceUtils.buildEnvMap(
compLaunchContext.getConfiguration(), tokensForSubstitution));
@@ -90,17 +93,14 @@ public abstract class AbstractProviderService implements ProviderService,
for (Entry<String, String> entry : launcher.getEnv().entrySet()) {
tokensForSubstitution.put($(entry.getKey()), entry.getValue());
}
- //TODO add component host tokens?
-// ProviderUtils.addComponentHostTokens(tokensForSubstitution, amState);
-
- // create config file on hdfs and add local resource
- ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
- compLaunchContext, tokensForSubstitution, instance, context);
-
- // handles static files (like normal file / archive file) for localization.
- ProviderUtils.handleStaticFilesForLocalization(launcher, fileSystem,
- compLaunchContext);
+ }
+ public void buildContainerLaunchCommand(AbstractLauncher launcher,
+ Service service, ComponentInstance instance,
+ SliderFileSystem fileSystem, Configuration yarnConf, Container container,
+ ContainerLaunchService.ComponentLaunchContext compLaunchContext,
+ Map<String, String> tokensForSubstitution)
+ throws IOException, SliderException {
// substitute launch command
String launchCommand = compLaunchContext.getLaunchCommand();
// docker container may have empty commands
@@ -112,10 +112,15 @@ public abstract class AbstractProviderService implements ProviderService,
operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
launcher.addCommand(operation.build());
}
+ }
+ public void buildContainerRetry(AbstractLauncher launcher,
+ Configuration yarnConf,
+ ContainerLaunchService.ComponentLaunchContext compLaunchContext) {
// By default retry forever every 30 seconds
launcher.setRetryContext(
- YarnServiceConf.getInt(CONTAINER_RETRY_MAX, DEFAULT_CONTAINER_RETRY_MAX,
+ YarnServiceConf.getInt(CONTAINER_RETRY_MAX,
+ DEFAULT_CONTAINER_RETRY_MAX,
compLaunchContext.getConfiguration(), yarnConf),
YarnServiceConf.getInt(CONTAINER_RETRY_INTERVAL,
DEFAULT_CONTAINER_RETRY_INTERVAL,
@@ -124,4 +129,38 @@ public abstract class AbstractProviderService implements ProviderService,
DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL,
compLaunchContext.getConfiguration(), yarnConf));
}
+
+ public void buildContainerLaunchContext(AbstractLauncher launcher,
+ Service service, ComponentInstance instance,
+ SliderFileSystem fileSystem, Configuration yarnConf, Container container,
+ ContainerLaunchService.ComponentLaunchContext compLaunchContext)
+ throws IOException, SliderException {
+ processArtifact(launcher, instance, fileSystem, service);
+
+ ServiceContext context =
+ instance.getComponent().getScheduler().getContext();
+ // Generate tokens (key-value pair) for config substitution.
+ Map<String, String> tokensForSubstitution =
+ buildContainerTokens(instance, container, compLaunchContext);
+
+ // Setup launch context environment
+ buildContainerEnvironment(launcher, service, instance,
+ fileSystem, yarnConf, container, compLaunchContext,
+ tokensForSubstitution);
+
+ // create config file on hdfs and add local resource
+ ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
+ compLaunchContext, tokensForSubstitution, instance, context);
+
+ // handles static files (like normal file / archive file) for localization.
+ ProviderUtils.handleStaticFilesForLocalization(launcher, fileSystem,
+ compLaunchContext);
+
+ // replace launch command with token specific information
+ buildContainerLaunchCommand(launcher, service, instance, fileSystem,
+ yarnConf, container, compLaunchContext, tokensForSubstitution);
+
+ // Setup container retry settings
+ buildContainerRetry(launcher, yarnConf, compLaunchContext);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
index c3e2619..821682d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
@@ -17,13 +17,23 @@
*/
package org.apache.hadoop.yarn.service.provider.docker;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.provider.AbstractProviderService;
+import org.apache.hadoop.yarn.service.provider.ProviderUtils;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+import org.apache.hadoop.yarn.service.containerlaunch.CommandLineBuilder;
+import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService;
+import org.apache.hadoop.yarn.service.exceptions.SliderException;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import java.io.IOException;
+import java.util.Map;
public class DockerProviderService extends AbstractProviderService
implements DockerKeys {
@@ -39,4 +49,36 @@ public class DockerProviderService extends AbstractProviderService
launcher.setRunPrivilegedContainer(
compInstance.getCompSpec().getRunPrivilegedContainer());
}
+
+ @Override
+ public void buildContainerLaunchCommand(AbstractLauncher launcher,
+ Service service, ComponentInstance instance,
+ SliderFileSystem fileSystem, Configuration yarnConf, Container container,
+ ContainerLaunchService.ComponentLaunchContext compLaunchContext,
+ Map<String, String> tokensForSubstitution)
+ throws IOException, SliderException {
+ Component component = instance.getComponent().getComponentSpec();
+ boolean useEntryPoint = Boolean.parseBoolean(component
+ .getConfiguration().getEnv(Environment
+ .YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE.name()));
+ if (useEntryPoint) {
+ String launchCommand = component.getLaunchCommand();
+ if (!StringUtils.isEmpty(launchCommand)) {
+ launcher.addCommand(launchCommand);
+ }
+ } else {
+ // substitute launch command
+ String launchCommand = compLaunchContext.getLaunchCommand();
+ // docker container may have empty commands
+ if (!StringUtils.isEmpty(launchCommand)) {
+ launchCommand = ProviderUtils
+ .substituteStrWithTokens(launchCommand, tokensForSubstitution);
+ CommandLineBuilder operation = new CommandLineBuilder();
+ operation.add(launchCommand);
+ operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
+ launcher.addCommand(operation.build());
+ }
+ }
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index fa77899..d43c069 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -1677,6 +1677,20 @@ public class ContainerLaunch implements Callable<Integer> {
containerLogDirs, Map<Path, List<String>> resources,
Path nmPrivateClasspathJarDir,
Set<String> nmVars) throws IOException {
+ // Based on discussion in YARN-7654, for ENTRY_POINT enabled
+ // docker container, we forward user defined environment variables
+ // without node manager environment variables. This is the reason
+ // that we skip sanitizeEnv method.
+ boolean overrideDisable = Boolean.parseBoolean(
+ environment.get(
+ Environment.
+ YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE.
+ name()));
+ if (overrideDisable) {
+ environment.remove("WORK_DIR");
+ return;
+ }
+
/**
* Non-modifiable environment variables
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 0bacd03..a14b085 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -235,7 +235,6 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
@InterfaceAudience.Private
public static final String ENV_DOCKER_CONTAINER_DELAYED_REMOVAL =
"YARN_CONTAINER_RUNTIME_DOCKER_DELAYED_REMOVAL";
-
private Configuration conf;
private Context nmContext;
private DockerClient dockerClient;
@@ -741,6 +740,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE);
String network = environment.get(ENV_DOCKER_CONTAINER_NETWORK);
String hostname = environment.get(ENV_DOCKER_CONTAINER_HOSTNAME);
+ boolean useEntryPoint = Boolean.parseBoolean(environment
+ .get(ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE));
if(network == null || network.isEmpty()) {
network = defaultNetwork;
@@ -802,8 +803,6 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
@SuppressWarnings("unchecked")
DockerRunCommand runCommand = new DockerRunCommand(containerIdStr,
dockerRunAsUser, imageName)
- .detachOnRun()
- .setContainerWorkDir(containerWorkDir.toString())
.setNetworkType(network);
// Only add hostname if network is not host or if Registry DNS is enabled.
if (!network.equalsIgnoreCase("host") ||
@@ -875,19 +874,22 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
addCGroupParentIfRequired(resourcesOpts, containerIdStr, runCommand);
- String disableOverride = environment.get(
- ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE);
-
- if (disableOverride != null && disableOverride.equals("true")) {
- LOG.info("command override disabled");
+ if (useEntryPoint) {
+ runCommand.setOverrideDisabled(true);
+ runCommand.addEnv(environment);
+ runCommand.setOverrideCommandWithArgs(container.getLaunchContext()
+ .getCommands());
+ runCommand.disableDetach();
+ runCommand.setLogDir(container.getLogDir());
} else {
List<String> overrideCommands = new ArrayList<>();
Path launchDst =
new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
-
overrideCommands.add("bash");
overrideCommands.add(launchDst.toUri().getPath());
+ runCommand.setContainerWorkDir(containerWorkDir.toString());
runCommand.setOverrideCommandWithArgs(overrideCommands);
+ runCommand.detachOnRun();
}
if(enableUserReMapping) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
index dd49e15..fca707c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
@@ -49,6 +49,7 @@ public final class DockerClient {
LoggerFactory.getLogger(DockerClient.class);
private static final String TMP_FILE_PREFIX = "docker.";
private static final String TMP_FILE_SUFFIX = ".cmd";
+ private static final String TMP_ENV_FILE_SUFFIX = ".env";
private final String tmpDirPath;
public DockerClient(Configuration conf) throws ContainerExecutionException {
@@ -69,40 +70,56 @@ public final class DockerClient {
public String writeCommandToTempFile(DockerCommand cmd, String filePrefix)
throws ContainerExecutionException {
- File dockerCommandFile = null;
try {
- dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
- TMP_FILE_SUFFIX, new
- File(tmpDirPath));
-
- Writer writer = new OutputStreamWriter(
- new FileOutputStream(dockerCommandFile), "UTF-8");
- PrintWriter printWriter = new PrintWriter(writer);
- printWriter.println("[docker-command-execution]");
- for (Map.Entry<String, List<String>> entry :
- cmd.getDockerCommandWithArguments().entrySet()) {
- if (entry.getKey().contains("=")) {
- throw new ContainerExecutionException(
- "'=' found in entry for docker command file, key = " + entry
- .getKey() + "; value = " + entry.getValue());
- }
- if (entry.getValue().contains("\n")) {
- throw new ContainerExecutionException(
- "'\\n' found in entry for docker command file, key = " + entry
- .getKey() + "; value = " + entry.getValue());
+ File dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
+ TMP_FILE_SUFFIX, new
+ File(tmpDirPath));
+ try (
+ Writer writer = new OutputStreamWriter(
+ new FileOutputStream(dockerCommandFile), "UTF-8");
+ PrintWriter printWriter = new PrintWriter(writer);
+ ) {
+ printWriter.println("[docker-command-execution]");
+ for (Map.Entry<String, List<String>> entry :
+ cmd.getDockerCommandWithArguments().entrySet()) {
+ if (entry.getKey().contains("=")) {
+ throw new ContainerExecutionException(
+ "'=' found in entry for docker command file, key = " + entry
+ .getKey() + "; value = " + entry.getValue());
+ }
+ if (entry.getValue().contains("\n")) {
+ throw new ContainerExecutionException(
+ "'\\n' found in entry for docker command file, key = " + entry
+ .getKey() + "; value = " + entry.getValue());
+ }
+ printWriter.println(" " + entry.getKey() + "=" + StringUtils
+ .join(",", entry.getValue()));
}
- printWriter.println(" " + entry.getKey() + "=" + StringUtils
- .join(",", entry.getValue()));
+ return dockerCommandFile.getAbsolutePath();
}
- printWriter.close();
-
- return dockerCommandFile.getAbsolutePath();
} catch (IOException e) {
LOG.warn("Unable to write docker command to temporary file!");
throw new ContainerExecutionException(e);
}
}
+ private String writeEnvFile(DockerRunCommand cmd, String filePrefix,
+ File cmdDir) throws IOException {
+ File dockerEnvFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
+ TMP_ENV_FILE_SUFFIX, cmdDir);
+ try (
+ Writer envWriter = new OutputStreamWriter(
+ new FileOutputStream(dockerEnvFile), "UTF-8");
+ PrintWriter envPrintWriter = new PrintWriter(envWriter);
+ ) {
+ for (Map.Entry<String, String> entry : cmd.getEnv()
+ .entrySet()) {
+ envPrintWriter.println(entry.getKey() + "=" + entry.getValue());
+ }
+ return dockerEnvFile.getAbsolutePath();
+ }
+ }
+
public String writeCommandToTempFile(DockerCommand cmd,
ContainerId containerId, Context nmContext)
throws ContainerExecutionException {
@@ -126,32 +143,38 @@ public final class DockerClient {
throw new IOException("Cannot create container private directory "
+ cmdDir);
}
-
dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
TMP_FILE_SUFFIX, cmdDir);
-
- Writer writer = new OutputStreamWriter(
- new FileOutputStream(dockerCommandFile.toString()), "UTF-8");
- PrintWriter printWriter = new PrintWriter(writer);
- printWriter.println("[docker-command-execution]");
- for (Map.Entry<String, List<String>> entry :
- cmd.getDockerCommandWithArguments().entrySet()) {
- if (entry.getKey().contains("=")) {
- throw new ContainerExecutionException(
- "'=' found in entry for docker command file, key = " + entry
- .getKey() + "; value = " + entry.getValue());
+ try (
+ Writer writer = new OutputStreamWriter(
+ new FileOutputStream(dockerCommandFile.toString()), "UTF-8");
+ PrintWriter printWriter = new PrintWriter(writer);
+ ) {
+ printWriter.println("[docker-command-execution]");
+ for (Map.Entry<String, List<String>> entry :
+ cmd.getDockerCommandWithArguments().entrySet()) {
+ if (entry.getKey().contains("=")) {
+ throw new ContainerExecutionException(
+ "'=' found in entry for docker command file, key = " + entry
+ .getKey() + "; value = " + entry.getValue());
+ }
+ if (entry.getValue().contains("\n")) {
+ throw new ContainerExecutionException(
+ "'\\n' found in entry for docker command file, key = " + entry
+ .getKey() + "; value = " + entry.getValue());
+ }
+ printWriter.println(" " + entry.getKey() + "=" + StringUtils
+ .join(",", entry.getValue()));
}
- if (entry.getValue().contains("\n")) {
- throw new ContainerExecutionException(
- "'\\n' found in entry for docker command file, key = " + entry
- .getKey() + "; value = " + entry.getValue());
+ if (cmd instanceof DockerRunCommand) {
+ DockerRunCommand runCommand = (DockerRunCommand) cmd;
+ if (runCommand.containsEnv()) {
+ String path = writeEnvFile(runCommand, filePrefix, cmdDir);
+ printWriter.println(" environ=" + path);
+ }
}
- printWriter.println(" " + entry.getKey() + "=" + StringUtils
- .join(",", entry.getValue()));
+ return dockerCommandFile.toString();
}
- printWriter.close();
-
- return dockerCommandFile.toString();
} catch (IOException e) {
LOG.warn("Unable to write docker command to " + cmdDir);
throw new ContainerExecutionException(e);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index bfeeaf5..af16178 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -21,12 +21,14 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
import java.io.File;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class DockerRunCommand extends DockerCommand {
private static final String RUN_COMMAND = "run";
+ private final Map<String, String> userEnv;
/** The following are mandatory: */
public DockerRunCommand(String containerId, String user, String image) {
@@ -34,6 +36,7 @@ public class DockerRunCommand extends DockerCommand {
super.addCommandArguments("name", containerId);
super.addCommandArguments("user", user);
super.addCommandArguments("image", image);
+ this.userEnv = new LinkedHashMap<String, String>();
}
public DockerRunCommand removeContainerOnExit() {
@@ -174,4 +177,45 @@ public class DockerRunCommand extends DockerCommand {
public Map<String, List<String>> getDockerCommandWithArguments() {
return super.getDockerCommandWithArguments();
}
+
+ public DockerRunCommand setOverrideDisabled(boolean toggle) {
+ String value = Boolean.toString(toggle);
+ super.addCommandArguments("use-entry-point", value);
+ return this;
+ }
+
+ public DockerRunCommand setLogDir(String logDir) {
+ super.addCommandArguments("log-dir", logDir);
+ return this;
+ }
+
+ /**
+ * Check if user defined environment variables are empty.
+ *
+ * @return true if user defined environment variables are not empty.
+ */
+ public boolean containsEnv() {
+ if (userEnv.size() > 0) {
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Get user defined environment variables.
+ *
+ * @return a map of user defined environment variables
+ */
+ public Map<String, String> getEnv() {
+ return userEnv;
+ }
+
+ /**
+ * Add user defined environment variables.
+ *
+ * @param environment A map of user defined environment variables
+ */
+ public final void addEnv(Map<String, String> environment) {
+ userEnv.putAll(environment);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index c5adbe4..7b62223 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -94,6 +94,8 @@ static gid_t nm_gid = -1;
struct configuration CFG = {.size=0, .sections=NULL};
struct section executor_cfg = {.size=0, .kv_pairs=NULL};
+static char *chosen_container_log_dir = NULL;
+
char *concatenate(char *concat_pattern, char *return_path_name,
int numArgs, ...);
@@ -755,8 +757,9 @@ static int create_container_directories(const char* user, const char *app_id,
} else if (mkdirs(container_log_dir, perms) != 0) {
free(container_log_dir);
} else {
- free(container_log_dir);
result = 0;
+ chosen_container_log_dir = strdup(container_log_dir);
+ free(container_log_dir);
}
}
free(combined_name);
@@ -1129,6 +1132,34 @@ char* get_container_log_directory(const char *log_root, const char* app_id,
container_id);
}
+char *init_log_path(const char *container_log_dir, const char *logfile) {
+ char *tmp_buffer = NULL;
+ tmp_buffer = make_string("%s/%s", container_log_dir, logfile);
+
+ mode_t permissions = S_IRUSR | S_IWUSR | S_IRGRP;
+ int fd = open(tmp_buffer, O_CREAT | O_WRONLY, permissions);
+ if (fd >= 0) {
+ close(fd);
+ if (change_owner(tmp_buffer, user_detail->pw_uid, user_detail->pw_gid) != 0) {
+ fprintf(ERRORFILE, "Failed to chown %s to %d:%d: %s\n", tmp_buffer, user_detail->pw_uid, user_detail->pw_gid,
+ strerror(errno));
+ free(tmp_buffer);
+ tmp_buffer = NULL;
+ } else if (chmod(tmp_buffer, permissions) != 0) {
+ fprintf(ERRORFILE, "Can't chmod %s - %s\n",
+ tmp_buffer, strerror(errno));
+ free(tmp_buffer);
+ tmp_buffer = NULL;
+ }
+ } else {
+ fprintf(ERRORFILE, "Failed to create file %s - %s\n", tmp_buffer,
+ strerror(errno));
+ free(tmp_buffer);
+ tmp_buffer = NULL;
+ }
+ return tmp_buffer;
+}
+
int create_container_log_dirs(const char *container_id, const char *app_id,
char * const * log_dirs) {
char* const* log_root;
@@ -1506,6 +1537,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
char *docker_inspect_exitcode_command = NULL;
int container_file_source =-1;
int cred_file_source = -1;
+ int use_entry_point = 0;
gid_t user_gid = getegid();
uid_t prev_uid = geteuid();
@@ -1560,6 +1592,18 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
goto cleanup;
}
+ use_entry_point = get_use_entry_point_flag();
+ char *so = init_log_path(chosen_container_log_dir, "stdout.txt");
+ if (so == NULL) {
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ goto cleanup;
+ }
+ char *se = init_log_path(chosen_container_log_dir, "stderr.txt");
+ if (se == NULL) {
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ goto cleanup;
+ }
+
docker_command_with_binary = flatten(docker_command);
// Launch container
@@ -1573,14 +1617,76 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
}
if (child_pid == 0) {
+ FILE* so_fd = fopen(so, "a+");
+ if (so_fd == NULL) {
+ fprintf(ERRORFILE, "Could not append to %s\n", so);
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ goto cleanup;
+ }
+ FILE* se_fd = fopen(se, "a+");
+ if (se_fd == NULL) {
+ fprintf(ERRORFILE, "Could not append to %s\n", se);
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ fclose(so_fd);
+ goto cleanup;
+ }
+ // if entry point is enabled, clone docker command output
+ // to stdout.txt and stderr.txt for yarn.
+ if (use_entry_point) {
+ fprintf(so_fd, "Launching docker container...\n");
+ fprintf(so_fd, "Docker run command: %s\n", docker_command_with_binary);
+ if (dup2(fileno(so_fd), fileno(stdout)) == -1) {
+ fprintf(ERRORFILE, "Could not append to stdout.txt\n");
+ fclose(so_fd);
+ return UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ }
+ if (dup2(fileno(se_fd), fileno(stderr)) == -1) {
+ fprintf(ERRORFILE, "Could not append to stderr.txt\n");
+ fclose(se_fd);
+ return UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ }
+ }
+ fclose(so_fd);
+ fclose(se_fd);
execvp(docker_binary, docker_command);
fprintf(ERRORFILE, "failed to execute docker command! error: %s\n", strerror(errno));
return UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
} else {
- exit_code = wait_and_get_exit_code(child_pid);
- if (exit_code != 0) {
- exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
- goto cleanup;
+ if (use_entry_point) {
+ int pid = 0;
+ int res = 0;
+ int count = 0;
+ int max_retries = get_max_retries(&CFG);
+ docker_inspect_command = make_string(
+ "%s inspect --format {{.State.Pid}} %s",
+ docker_binary, container_id);
+ // check for docker container pid
+ while (count < max_retries) {
+ fprintf(LOGFILE, "Inspecting docker container...\n");
+ fprintf(LOGFILE, "Docker inspect command: %s\n", docker_inspect_command);
+ fflush(LOGFILE);
+ FILE* inspect_docker = popen(docker_inspect_command, "r");
+ res = fscanf (inspect_docker, "%d", &pid);
+ fprintf(LOGFILE, "pid from docker inspect: %d\n", pid);
+ if (pclose (inspect_docker) != 0 || res <= 0) {
+ fprintf (ERRORFILE,
+ "Could not inspect docker to get pid %s.\n", docker_inspect_command);
+ fflush(ERRORFILE);
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ } else {
+ if (pid != 0) {
+ break;
+ }
+ }
+ sleep(3);
+ count++;
+ }
+ } else {
+ exit_code = wait_and_get_exit_code(child_pid);
+ if (exit_code != 0) {
+ exit_code = UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+ goto cleanup;
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 5be02a9..f361d34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -32,6 +32,8 @@
#include <pwd.h>
#include <errno.h>
+int entry_point = 0;
+
static int read_and_verify_command_file(const char *command_file, const char *docker_command,
struct configuration *command_config) {
int ret = 0;
@@ -336,6 +338,17 @@ const char *get_docker_error_message(const int error_code) {
}
}
+int get_max_retries(const struct configuration *conf) {
+ int retries = 10;
+ char *max_retries = get_configuration_value(DOCKER_INSPECT_MAX_RETRIES_KEY,
+ CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf);
+ if (max_retries != NULL) {
+ retries = atoi(max_retries);
+ free(max_retries);
+ }
+ return retries;
+}
+
char *get_docker_binary(const struct configuration *conf) {
char *docker_binary = NULL;
docker_binary = get_configuration_value(DOCKER_BINARY_KEY, CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf);
@@ -348,6 +361,10 @@ char *get_docker_binary(const struct configuration *conf) {
return docker_binary;
}
+int get_use_entry_point_flag() {
+ return entry_point;
+}
+
int docker_module_enabled(const struct configuration *conf) {
struct section *section = get_configuration_section(CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf);
if (section != NULL) {
@@ -365,6 +382,12 @@ int get_docker_command(const char *command_file, const struct configuration *con
return INVALID_COMMAND_FILE;
}
+ char *value = get_configuration_value("use-entry-point", DOCKER_COMMAND_FILE_SECTION, &command_config);
+ if (value != NULL && strcasecmp(value, "true") == 0) {
+ entry_point = 1;
+ }
+ free(value);
+
char *command = get_configuration_value("docker-command", DOCKER_COMMAND_FILE_SECTION, &command_config);
if (strcmp(DOCKER_INSPECT_COMMAND, command) == 0) {
return get_docker_inspect_command(command_file, conf, args);
@@ -1009,6 +1032,24 @@ static int set_devices(const struct configuration *command_config, const struct
return ret;
}
+static int set_env(const struct configuration *command_config, struct args *args) {
+ int ret = 0;
+ // Use envfile method.
+ char *envfile = get_configuration_value("environ", DOCKER_COMMAND_FILE_SECTION, command_config);
+ if (envfile != NULL) {
+ ret = add_to_args(args, "--env-file");
+ if (ret != 0) {
+ ret = BUFFER_TOO_SMALL;
+ }
+ ret = add_to_args(args, envfile);
+ if (ret != 0) {
+ ret = BUFFER_TOO_SMALL;
+ }
+ free(envfile);
+ }
+ return ret;
+}
+
/**
* Helper function to help normalize mounts for checking if mounts are
* permitted. The function does the following -
@@ -1520,6 +1561,11 @@ int get_docker_run_command(const char *command_file, const struct configuration
return ret;
}
+ ret = set_env(&command_config, args);
+ if (ret != 0) {
+ return BUFFER_TOO_SMALL;
+ }
+
ret = add_to_args(args, image);
if (ret != 0) {
reset_args(args);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
index 330d722..864acd9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
@@ -23,6 +23,7 @@
#define CONTAINER_EXECUTOR_CFG_DOCKER_SECTION "docker"
#define DOCKER_BINARY_KEY "docker.binary"
+#define DOCKER_INSPECT_MAX_RETRIES_KEY "docker.inspect.max.retries"
#define DOCKER_COMMAND_FILE_SECTION "docker-command-execution"
#define DOCKER_INSPECT_COMMAND "inspect"
#define DOCKER_LOAD_COMMAND "load"
@@ -86,6 +87,12 @@ char *get_docker_binary(const struct configuration *conf);
int get_docker_command(const char* command_file, const struct configuration* conf, args *args);
/**
+ * Check if use-entry-point flag is set.
+ * @return 0 when use-entry-point flag is set.
+ */
+int get_use_entry_point_flag();
+
+/**
* Get the Docker inspect command line string. The function will verify that the params file is meant for the
* inspect command.
* @param command_file File containing the params for the Docker inspect command
@@ -202,4 +209,11 @@ void reset_args(args *args);
* @param args Pointer reference to args data structure
*/
char** extract_execv_args(args *args);
+
+/**
+ * Get max retries for docker inspect.
+ * @param conf Configuration structure
+ * @return value of max retries
+ */
+int get_max_retries(const struct configuration *conf);
#endif
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index 3746fa1..1fa425c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -1312,6 +1312,48 @@ namespace ContainerExecutor {
run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, get_docker_run_command);
}
+ TEST_F(TestDockerUtil, test_docker_run_entry_point) {
+
+ std::string container_executor_contents = "[docker]\n"
+ " docker.allowed.ro-mounts=/var,/etc,/usr/bin/cut\n"
+ " docker.allowed.rw-mounts=/tmp\n docker.allowed.networks=bridge\n "
+ " docker.privileged-containers.enabled=1\n docker.allowed.capabilities=CHOWN,SETUID\n"
+ " docker.allowed.devices=/dev/test\n docker.privileged-containers.registries=hadoop\n";
+ write_file(container_executor_cfg_file, container_executor_contents);
+ int ret = read_config(container_executor_cfg_file.c_str(), &container_executor_cfg);
+ if (ret != 0) {
+ FAIL();
+ }
+ ret = create_ce_file();
+ if (ret != 0) {
+ std::cerr << "Could not create ce file, skipping test" << std::endl;
+ return;
+ }
+
+ std::vector<std::pair<std::string, std::string> > file_cmd_vec;
+ file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
+ "[docker-command-execution]\n"
+ " docker-command=run\n"
+ " name=container_e1_12312_11111_02_000001\n"
+ " image=hadoop/docker-image\n"
+ " user=nobody\n"
+ " use-entry-point=true\n"
+ " environ=/tmp/test.env\n",
+ "/usr/bin/docker run --name=container_e1_12312_11111_02_000001 --user=nobody --cap-drop=ALL "
+ "--env-file /tmp/test.env hadoop/docker-image"));
+
+ std::vector<std::pair<std::string, int> > bad_file_cmd_vec;
+
+ bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
+ "[docker-command-execution]\n"
+ " docker-command=run\n"
+ " image=hadoop/docker-image\n"
+ " user=nobody",
+ static_cast<int>(INVALID_DOCKER_CONTAINER_NAME)));
+
+ run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, get_docker_run_command);
+ }
+
TEST_F(TestDockerUtil, test_docker_run_no_privileged) {
std::string container_executor_contents[] = {"[docker]\n docker.allowed.ro-mounts=/var,/etc,/usr/bin/cut\n"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c8e51ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index 2efba3b..423f1da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -207,6 +207,7 @@ are allowed. It contains the following properties:
| `docker.host-pid-namespace.enabled` | Set to "true" or "false" to enable or disable using the host's PID namespace. Default value is "false". |
| `docker.privileged-containers.enabled` | Set to "true" or "false" to enable or disable launching privileged containers. Default value is "false". |
| `docker.privileged-containers.registries` | Comma separated list of trusted docker registries for running trusted privileged docker containers. By default, no registries are defined. |
+| `docker.inspect.max.retries` | Integer value to check docker container readiness. Each inspection is set with 3 seconds delay. Default value of 10 will wait 30 seconds for docker container to become ready before marked as container failed. |
Please note that if you wish to run Docker containers that require access to the YARN local directories, you must add them to the docker.allowed.rw-mounts list.
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[32/50] [abbrv] hadoop git commit: HDDS-29. Fix
TestStorageContainerManager#testRpcPermission. Contributed by Mukul Kumar
Singh.
Posted by xy...@apache.org.
HDDS-29. Fix TestStorageContainerManager#testRpcPermission. Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc5d49c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc5d49c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc5d49c2
Branch: refs/heads/HDDS-4
Commit: fc5d49c202354c6f39b33ea3f80f38e85794c6b3
Parents: 8a2b591
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon May 14 09:09:25 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 14 09:10:03 2018 -0700
----------------------------------------------------------------------
.../scm/server/SCMClientProtocolServer.java | 15 +++++++++--
.../scm/server/StorageContainerManager.java | 9 +------
.../ozone/TestStorageContainerManager.java | 27 ++++++++++----------
3 files changed, 28 insertions(+), 23 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc5d49c2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 246d053..d73cccd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.protocolPB
.StorageContainerLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -137,17 +138,26 @@ public class SCMClientProtocolServer implements
getClientRpcServer().join();
}
+ @VisibleForTesting
+ public String getRpcRemoteUsername() {
+ UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
+ return user == null ? null : user.getUserName();
+ }
+
@Override
public ContainerInfo allocateContainer(HddsProtos.ReplicationType
replicationType, HddsProtos.ReplicationFactor factor,
String owner) throws IOException {
- getScm().checkAdminAccess();
+ String remoteUser = getRpcRemoteUsername();
+ getScm().checkAdminAccess(remoteUser);
return scm.getScmContainerManager()
.allocateContainer(replicationType, factor, owner);
}
@Override
public ContainerInfo getContainer(long containerID) throws IOException {
+ String remoteUser = getRpcRemoteUsername();
+ getScm().checkAdminAccess(remoteUser);
return scm.getScmContainerManager()
.getContainer(containerID);
}
@@ -161,7 +171,8 @@ public class SCMClientProtocolServer implements
@Override
public void deleteContainer(long containerID) throws IOException {
- getScm().checkAdminAccess();
+ String remoteUser = getRpcRemoteUsername();
+ getScm().checkAdminAccess(remoteUser);
scm.getScmContainerManager().deleteContainer(containerID);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc5d49c2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index a7248bb..0fd6843 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -620,14 +620,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl
return scmBlockManager;
}
- @VisibleForTesting
- public String getPpcRemoteUsername() {
- UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser();
- return user == null ? null : user.getUserName();
- }
-
- public void checkAdminAccess() throws IOException {
- String remoteUser = getPpcRemoteUsername();
+ public void checkAdminAccess(String remoteUser) throws IOException {
if (remoteUser != null) {
if (!scmAdminUsernames.contains(remoteUser)) {
throw new IOException(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc5d49c2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 9a33885..0081f0d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -23,6 +23,7 @@ import java.io.IOException;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
import org.apache.hadoop.hdds.scm.server.SCMStorage;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
@@ -107,19 +108,19 @@ public class TestStorageContainerManager {
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build();
cluster.waitForClusterToBeReady();
try {
- String fakeUser = fakeRemoteUsername;
- StorageContainerManager mockScm = Mockito.spy(
- cluster.getStorageContainerManager());
- Mockito.when(mockScm.getPpcRemoteUsername())
- .thenReturn(fakeUser);
+
+ SCMClientProtocolServer mockClientServer = Mockito.spy(
+ cluster.getStorageContainerManager().getClientProtocolServer());
+ Mockito.when(mockClientServer.getRpcRemoteUsername())
+ .thenReturn(fakeRemoteUsername);
try {
- mockScm.getClientProtocolServer().deleteContainer(
+ mockClientServer.deleteContainer(
ContainerTestHelper.getTestContainerID());
fail("Operation should fail, expecting an IOException here.");
} catch (Exception e) {
if (expectPermissionDenied) {
- verifyPermissionDeniedException(e, fakeUser);
+ verifyPermissionDeniedException(e, fakeRemoteUsername);
} else {
// If passes permission check, it should fail with
// container not exist exception.
@@ -129,7 +130,7 @@ public class TestStorageContainerManager {
}
try {
- ContainerInfo container2 = mockScm.getClientProtocolServer()
+ ContainerInfo container2 = mockClientServer
.allocateContainer(xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, "OZONE");
if (expectPermissionDenied) {
@@ -138,11 +139,11 @@ public class TestStorageContainerManager {
Assert.assertEquals(1, container2.getPipeline().getMachines().size());
}
} catch (Exception e) {
- verifyPermissionDeniedException(e, fakeUser);
+ verifyPermissionDeniedException(e, fakeRemoteUsername);
}
try {
- ContainerInfo container3 = mockScm.getClientProtocolServer()
+ ContainerInfo container3 = mockClientServer
.allocateContainer(xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, "OZONE");
if (expectPermissionDenied) {
@@ -151,16 +152,16 @@ public class TestStorageContainerManager {
Assert.assertEquals(1, container3.getPipeline().getMachines().size());
}
} catch (Exception e) {
- verifyPermissionDeniedException(e, fakeUser);
+ verifyPermissionDeniedException(e, fakeRemoteUsername);
}
try {
- mockScm.getClientProtocolServer().getContainer(
+ mockClientServer.getContainer(
ContainerTestHelper.getTestContainerID());
fail("Operation should fail, expecting an IOException here.");
} catch (Exception e) {
if (expectPermissionDenied) {
- verifyPermissionDeniedException(e, fakeUser);
+ verifyPermissionDeniedException(e, fakeRemoteUsername);
} else {
// If passes permission check, it should fail with
// key not exist exception.
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[40/50] [abbrv] hadoop git commit: YARN-8278. DistributedScheduling
is not working in HA. Contributed by Bibin A Chundatt.
Posted by xy...@apache.org.
YARN-8278. DistributedScheduling is not working in HA. Contributed by Bibin A Chundatt.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bb647bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bb647bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bb647bb
Branch: refs/heads/HDDS-4
Commit: 2bb647bb91439e82cf7298e963bb5f7f80bbc3cb
Parents: 58b97c7
Author: Weiwei Yang <ww...@apache.org>
Authored: Tue May 15 17:28:19 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Tue May 15 17:28:19 2018 +0800
----------------------------------------------------------------------
.../hadoop/yarn/server/api/ServerRMProxy.java | 11 +++--
.../yarn/server/api/TestServerRMProxy.java | 52 ++++++++++++++++++++
2 files changed, 60 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bb647bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java
index cd92415..ee9956f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java
@@ -34,6 +34,11 @@ public class ServerRMProxy<T> extends RMProxy<T> {
private static final Logger LOG =
LoggerFactory.getLogger(ServerRMProxy.class);
+ private interface ServerRMProtocols
+ extends DistributedSchedulingAMProtocol, ResourceTracker {
+ //Add nothing
+ }
+
private ServerRMProxy() {
super();
}
@@ -95,8 +100,8 @@ public class ServerRMProxy<T> extends RMProxy<T> {
@InterfaceAudience.Private
@Override
public void checkAllowedProtocols(Class<?> protocol) {
- Preconditions.checkArgument(
- protocol.isAssignableFrom(ResourceTracker.class),
- "ResourceManager does not support this protocol");
+ Preconditions
+ .checkArgument(protocol.isAssignableFrom(ServerRMProtocols.class),
+ "ResourceManager does not support this protocol");
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bb647bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/TestServerRMProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/TestServerRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/TestServerRMProxy.java
new file mode 100644
index 0000000..e620207
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/TestServerRMProxy.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api;
+
+import org.apache.hadoop.yarn.conf.HAUtil;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test ServerRMProxy.
+ */
+public class TestServerRMProxy {
+
+ @Test
+ public void testDistributedProtocol() {
+
+ YarnConfiguration conf = new YarnConfiguration();
+ try {
+ ServerRMProxy.createRMProxy(conf, DistributedSchedulingAMProtocol.class);
+ } catch (Exception e) {
+ Assert.fail("DistributedSchedulingAMProtocol fail in non HA");
+ }
+
+ // HA is enabled
+ conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
+ conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
+ conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm1"), "0.0.0.0");
+ conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm2"), "0.0.0.0");
+ try {
+ ServerRMProxy.createRMProxy(conf, DistributedSchedulingAMProtocol.class);
+ } catch (Exception e) {
+ Assert.fail("DistributedSchedulingAMProtocol fail in HA");
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/50] [abbrv] hadoop git commit: HDDS-34. Remove .meta file during
creation of container Contributed by Bharat Viswanadham.
Posted by xy...@apache.org.
HDDS-34. Remove .meta file during creation of container
Contributed by Bharat Viswanadham.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30293f60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30293f60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30293f60
Branch: refs/heads/HDDS-4
Commit: 30293f6065c9e5b41c07cd670c7a6a1768d1434b
Parents: db1ab0f
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 10 17:08:26 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu May 10 17:08:26 2018 -0700
----------------------------------------------------------------------
.../main/proto/DatanodeContainerProtocol.proto | 5 ---
.../container/common/helpers/ContainerData.java | 27 -------------
.../common/helpers/ContainerUtils.java | 35 ++--------------
.../common/impl/ContainerManagerImpl.java | 42 ++------------------
.../scm/cli/container/InfoContainerHandler.java | 3 --
.../org/apache/hadoop/ozone/scm/TestSCMCli.java | 2 +-
6 files changed, 9 insertions(+), 105 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 172b660..80bc22d 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -221,17 +221,12 @@ message ContainerData {
repeated KeyValue metadata = 2;
optional string dbPath = 3;
optional string containerPath = 4;
- optional string hash = 5;
optional int64 bytesUsed = 6;
optional int64 size = 7;
optional int64 keyCount = 8;
optional ContainerLifeCycleState state = 9 [default = OPEN];
}
-message ContainerMeta {
- required string fileName = 1;
- required string hash = 2;
-}
// Container Messages.
message CreateContainerRequestProto {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 799cca3..947dc7d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -18,14 +18,12 @@
package org.apache.hadoop.ozone.container.common.helpers;
-import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerLifeCycleState;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.util.Time;
import java.io.IOException;
import java.util.Collections;
@@ -45,7 +43,6 @@ public class ContainerData {
private String dbPath; // Path to Level DB Store.
// Path to Physical file system where container and checksum are stored.
private String containerFilePath;
- private String hash;
private AtomicLong bytesUsed;
private long maxSize;
private long containerID;
@@ -95,10 +92,6 @@ public class ContainerData {
data.setState(protoData.getState());
}
- if(protoData.hasHash()) {
- data.setHash(protoData.getHash());
- }
-
if (protoData.hasBytesUsed()) {
data.setBytesUsed(protoData.getBytesUsed());
}
@@ -123,10 +116,6 @@ public class ContainerData {
builder.setDbPath(this.getDBPath());
}
- if (this.getHash() != null) {
- builder.setHash(this.getHash());
- }
-
if (this.getContainerPath() != null) {
builder.setContainerPath(this.getContainerPath());
}
@@ -274,22 +263,6 @@ public class ContainerData {
// TODO: closed or closing here
setState(ContainerLifeCycleState.CLOSED);
- // Some thing brain dead for now. name + Time stamp of when we get the close
- // container message.
- setHash(DigestUtils.sha256Hex(this.getContainerID() +
- Long.toString(Time.monotonicNow())));
- }
-
- /**
- * Final hash for this container.
- * @return - Hash
- */
- public String getHash() {
- return hash;
- }
-
- public void setHash(String hash) {
- this.hash = hash;
}
public void setMaxSize(long maxSize) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index e244354..959d88c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -47,7 +47,7 @@ import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
.UNABLE_TO_FIND_DATA_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
+
/**
* A set of helper functions to create proper responses.
@@ -194,10 +194,9 @@ public final class ContainerUtils {
* Verifies that this in indeed a new container.
*
* @param containerFile - Container File to verify
- * @param metadataFile - metadata File to verify
* @throws IOException
*/
- public static void verifyIsNewContainer(File containerFile, File metadataFile)
+ public static void verifyIsNewContainer(File containerFile)
throws IOException {
Logger log = LoggerFactory.getLogger(ContainerManagerImpl.class);
if (containerFile.exists()) {
@@ -207,13 +206,6 @@ public final class ContainerUtils {
"disk.");
}
- if (metadataFile.exists()) {
- log.error("metadata found on disk, but missing container. Refusing to" +
- " write this container. File: {} ", metadataFile.toPath());
- throw new FileAlreadyExistsException(("metadata found on disk, but " +
- "missing container. Refusing to write this container."));
- }
-
File parentPath = new File(containerFile.getParent());
if (!parentPath.exists() && !parentPath.mkdirs()) {
@@ -228,11 +220,6 @@ public final class ContainerUtils {
throw new IOException("creation of a new container file failed.");
}
- if (!metadataFile.createNewFile()) {
- log.error("creation of the metadata file failed. File: {}",
- metadataFile.toPath());
- throw new IOException("creation of a new container file failed.");
- }
}
public static String getContainerDbFileName(String containerName) {
@@ -287,20 +274,6 @@ public final class ContainerUtils {
}
/**
- * Returns Metadata location.
- *
- * @param containerData - Data
- * @param location - Path
- * @return Path
- */
- public static File getMetadataFile(ContainerData containerData,
- Path location) {
- return location.resolve(Long.toString(containerData
- .getContainerID()).concat(CONTAINER_META))
- .toFile();
- }
-
- /**
* Returns container file location.
*
* @param containerData - Data
@@ -395,10 +368,10 @@ public final class ContainerUtils {
String rootPath = getContainerNameFromFile(new File(containerData
.getContainerPath()));
Path containerPath = Paths.get(rootPath.concat(CONTAINER_EXTENSION));
- Path metaPath = Paths.get(rootPath.concat(CONTAINER_META));
+
FileUtils.forceDelete(containerPath.toFile());
- FileUtils.forceDelete(metaPath.toFile());
+
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 1893b3b..cb60334 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common.impl;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -101,7 +100,6 @@ import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
/**
* A Generic ContainerManagerImpl that will be called from Ozone
@@ -233,18 +231,11 @@ public class ContainerManagerImpl implements ContainerManager {
long containerID = Long.parseLong(keyName);
try {
String containerFileName = containerName.concat(CONTAINER_EXTENSION);
- String metaFileName = containerName.concat(CONTAINER_META);
containerStream = new FileInputStream(containerFileName);
- metaStream = new FileInputStream(metaFileName);
-
- MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-
- dis = new DigestInputStream(containerStream, sha);
-
ContainerProtos.ContainerData containerDataProto =
- ContainerProtos.ContainerData.parseDelimitedFrom(dis);
+ ContainerProtos.ContainerData.parseDelimitedFrom(containerStream);
ContainerData containerData;
if (containerDataProto == null) {
// Sometimes container metadata might have been created but empty,
@@ -255,19 +246,6 @@ public class ContainerManagerImpl implements ContainerManager {
return;
}
containerData = ContainerData.getFromProtBuf(containerDataProto, conf);
- ContainerProtos.ContainerMeta meta =
- ContainerProtos.ContainerMeta.parseDelimitedFrom(metaStream);
- if (meta != null && !DigestUtils.sha256Hex(sha.digest())
- .equals(meta.getHash())) {
- // This means we were not able read data from the disk when booted the
- // datanode. We are going to rely on SCM understanding that we don't
- // have valid data for this container when we send container reports.
- // Hopefully SCM will ask us to delete this container and rebuild it.
- LOG.error("Invalid SHA found for container data. Name :{}"
- + "cowardly refusing to read invalid data", containerName);
- containerMap.put(containerID, new ContainerStatus(null));
- return;
- }
ContainerStatus containerStatus = new ContainerStatus(containerData);
// Initialize pending deletion blocks count in in-memory
@@ -298,7 +276,7 @@ public class ContainerManagerImpl implements ContainerManager {
containerStatus.setBytesUsed(bytesUsed);
containerMap.put(containerID, containerStatus);
- } catch (IOException | NoSuchAlgorithmException ex) {
+ } catch (IOException ex) {
LOG.error("read failed for file: {} ex: {}", containerName,
ex.getMessage());
@@ -398,12 +376,10 @@ public class ContainerManagerImpl implements ContainerManager {
File containerFile = ContainerUtils.getContainerFile(containerData,
location);
- File metadataFile = ContainerUtils.getMetadataFile(containerData,
- location);
String containerName = Long.toString(containerData.getContainerID());
if(!overwrite) {
- ContainerUtils.verifyIsNewContainer(containerFile, metadataFile);
+ ContainerUtils.verifyIsNewContainer(containerFile);
metadataPath = this.locationManager.getDataPath(containerName);
metadataPath = ContainerUtils.createMetadata(metadataPath,
containerName, conf);
@@ -412,7 +388,7 @@ public class ContainerManagerImpl implements ContainerManager {
}
containerStream = new FileOutputStream(containerFile);
- metaStream = new FileOutputStream(metadataFile);
+
MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
dos = new DigestOutputStream(containerStream, sha);
@@ -425,13 +401,6 @@ public class ContainerManagerImpl implements ContainerManager {
.getProtoBufMessage();
protoData.writeDelimitedTo(dos);
- ContainerProtos.ContainerMeta protoMeta = ContainerProtos
- .ContainerMeta.newBuilder()
- .setFileName(containerFile.toString())
- .setHash(DigestUtils.sha256Hex(sha.digest()))
- .build();
- protoMeta.writeDelimitedTo(metaStream);
-
} catch (IOException ex) {
// TODO : we need to clean up partially constructed files
// The proper way to do would be for a thread
@@ -913,9 +882,6 @@ public class ContainerManagerImpl implements ContainerManager {
.setWriteBytes(container.getWriteBytes())
.setContainerID(container.getContainer().getContainerID());
- if (container.getContainer().getHash() != null) {
- ciBuilder.setFinalhash(container.getContainer().getHash());
- }
crBuilder.addReports(ciBuilder.build());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index 843d9db..cefa28c 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -81,9 +81,6 @@ public class InfoContainerHandler extends OzoneCommandHandler {
containerData.getState() == ContainerLifeCycleState.OPEN ? "OPEN" :
"CLOSED";
logOut("Container State: %s", openStatus);
- if (!containerData.getHash().isEmpty()) {
- logOut("Container Hash: %s", containerData.getHash());
- }
logOut("Container DB Path: %s", containerData.getDbPath());
logOut("Container Path: %s", containerData.getContainerPath());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30293f60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 2d8577c..19bc423 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -332,7 +332,7 @@ public class TestSCMCli {
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
expected = String
.format(formatStrWithHash, container.getContainerID(), openStatus,
- data.getHash(), data.getDBPath(), data.getContainerPath(), "",
+ data.getDBPath(), data.getContainerPath(), "",
datanodeDetails.getHostName(), datanodeDetails.getHostName());
assertEquals(expected, out.toString());
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[47/50] [abbrv] hadoop git commit: HDDS-5. Enable OzoneManager
kerberos auth. Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e95b5de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e95b5de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e95b5de
Branch: refs/heads/HDDS-4
Commit: 1e95b5def78d6b61ad2b1e7053a27bb99ee5822c
Parents: 998df5a
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon May 14 09:36:57 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue May 15 16:55:52 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 4 +
.../common/src/main/resources/ozone-default.xml | 33 +++-
.../apache/hadoop/ozone/ksm/KSMConfigKeys.java | 5 +
.../ksm/protocol/KeySpaceManagerProtocol.java | 4 +
.../protocolPB/KeySpaceManagerProtocolPB.java | 5 +
.../hadoop/ozone/MiniOzoneClusterImpl.java | 3 +-
.../ozone/TestOzoneConfigurationFields.java | 3 +-
.../hadoop/ozone/TestSecureOzoneCluster.java | 169 +++++++++++++++----
.../hadoop/ozone/ksm/KeySpaceManager.java | 53 +++++-
.../ozone/ksm/KeySpaceManagerHttpServer.java | 5 +-
10 files changed, 238 insertions(+), 46 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index dec2c1c..a12d6ac 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -20,4 +20,8 @@ package org.apache.hadoop.hdds;
public final class HddsConfigKeys {
private HddsConfigKeys() {
}
+ public static final String HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY = "hdds.ksm."
+ + "kerberos.keytab.file";
+ public static final String HDDS_KSM_KERBEROS_PRINCIPAL_KEY = "hdds.ksm"
+ + ".kerberos.principal";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 46c67fd..deb286d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1071,7 +1071,23 @@
<name>ozone.scm.kerberos.principal</name>
<value></value>
<tag> OZONE, SECURITY</tag>
- <description>The SCM service principal. Ex scm/_HOST@REALM.TLD.</description>
+ <description>The SCM service principal. Ex scm/_HOST@REALM.COM</description>
+ </property>
+
+ <property>
+ <name>hdds.ksm.kerberos.keytab.file</name>
+ <value></value>
+ <tag> HDDS, SECURITY</tag>
+ <description> The keytab file used by KSM daemon to login as its
+ service principal. The principal name is configured with
+ hdds.ksm.kerberos.principal.
+ </description>
+ </property>
+ <property>
+ <name>hdds.ksm.kerberos.principal</name>
+ <value></value>
+ <tag> HDDS, SECURITY</tag>
+ <description>The KSM service principal. Ex ksm/_HOST@REALM.COM</description>
</property>
<property>
@@ -1083,4 +1099,19 @@
<value>/etc/security/keytabs/HTTP.keytab</value>
</property>
+ <property>
+ <name>hdds.ksm.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@EXAMPLE.COM</value>
+ <description>
+ KSM http server kerberos principal.
+ </description>
+ </property>
+ <property>
+ <name>hdds.ksm.web.authentication.kerberos.keytab</name>
+ <value>/etc/security/keytabs/HTTP.keytab</value>
+ <description>
+ KSM http server kerberos keytab.
+ </description>
+ </property>
+
</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
index 75cf613..d911bcb 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
@@ -78,4 +78,9 @@ public final class KSMConfigKeys {
public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
"ozone.key.deleting.limit.per.task";
public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+
+ public static final String KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+ "hdds.ksm.web.authentication.kerberos.principal";
+ public static final String KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+ "hdds.ksm.web.authentication.kerberos.keytab";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
index 54862d3..de27108 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone.ksm.protocol;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
@@ -29,10 +30,13 @@ import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.OzoneAclInfo;
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol to talk to KSM.
*/
+@KerberosInfo(
+ serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
public interface KeySpaceManagerProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
index 8acca8a..71b9da0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
@@ -18,9 +18,12 @@
package org.apache.hadoop.ozone.ksm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.KeySpaceManagerService;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used to communicate with KSM.
@@ -28,6 +31,8 @@ import org.apache.hadoop.ozone.protocol.proto
@ProtocolInfo(protocolName =
"org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol",
protocolVersion = 1)
+@KerberosInfo(
+ serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface KeySpaceManagerProtocolPB
extends KeySpaceManagerService.BlockingInterface {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index e4f8e62..b837100 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -344,7 +344,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
*
* @throws IOException
*/
- private KeySpaceManager createKSM() throws IOException {
+ private KeySpaceManager createKSM()
+ throws IOException, AuthenticationException {
configureKSM();
KSMStorage ksmStore = new KSMStorage(conf);
ksmStore.setClusterId(clusterId);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 533a3b4..a1d3fd0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone;
import org.apache.hadoop.conf.TestConfigurationFieldsBase;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -31,7 +32,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
xmlFilename = new String("ozone-default.xml");
configurationClasses =
new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class,
- KSMConfigKeys.class};
+ KSMConfigKeys.class, HddsConfigKeys.class};
errorIfMissingConfigProps = true;
errorIfMissingXmlProps = true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 9c430ad..b917dfe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -26,24 +26,34 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Properties;
import java.util.UUID;
+import java.util.concurrent.Callable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.server.SCMStorage;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.ksm.KSMStorage;
+import org.apache.hadoop.ozone.ksm.KeySpaceManager;
import org.apache.hadoop.security.KerberosAuthException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -56,13 +66,23 @@ public final class TestSecureOzoneCluster {
private Logger LOGGER = LoggerFactory
.getLogger(TestSecureOzoneCluster.class);
+ @Rule
+ public Timeout timeout = new Timeout(80000);
+
private MiniKdc miniKdc;
private OzoneConfiguration conf;
private File workDir;
private static Properties securityProperties;
private File scmKeytab;
private File spnegoKeytab;
+ private File ksmKeyTab;
private String curUser;
+ private StorageContainerManager scm;
+ private KeySpaceManager ksm;
+
+ private static String clusterId;
+ private static String scmId;
+ private static String ksmId;
@Before
public void init() {
@@ -71,6 +91,10 @@ public final class TestSecureOzoneCluster {
startMiniKdc();
setSecureConfig(conf);
createCredentialsInKDC(conf, miniKdc);
+
+ clusterId = UUID.randomUUID().toString();
+ scmId = UUID.randomUUID().toString();
+ ksmId = UUID.randomUUID().toString();
} catch (IOException e) {
LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
} catch (Exception e) {
@@ -78,12 +102,30 @@ public final class TestSecureOzoneCluster {
}
}
+ @After
+ public void stop() {
+ try {
+ stopMiniKdc();
+ if (scm != null) {
+ scm.stop();
+ }
+ if (ksm != null) {
+ ksm.stop();
+ }
+ } catch (Exception e) {
+ LOGGER.error("Failed to stop TestSecureOzoneCluster", e);
+ }
+ }
+
private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
throws Exception {
createPrincipal(scmKeytab,
conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
createPrincipal(spnegoKeytab,
- conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+ conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
+ conf.get(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL));
+ createPrincipal(ksmKeyTab,
+ conf.get(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY));
}
private void createPrincipal(File keytab, String... principal)
@@ -99,6 +141,10 @@ public final class TestSecureOzoneCluster {
miniKdc.start();
}
+ private void stopMiniKdc() throws Exception {
+ miniKdc.stop();
+ }
+
private void setSecureConfig(Configuration conf) throws IOException {
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
String host = KerberosUtil.getLocalHostName();
@@ -114,59 +160,56 @@ public final class TestSecureOzoneCluster {
conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
"HTTP_SCM/" + host + "@" + realm);
+ conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+ "ksm/" + host + "@" + realm);
+ conf.set(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL,
+ "HTTP_KSM/" + host + "@" + realm);
+
scmKeytab = new File(workDir, "scm.keytab");
spnegoKeytab = new File(workDir, "http.keytab");
+ ksmKeyTab = new File(workDir, "ksm.keytab");
conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
scmKeytab.getAbsolutePath());
conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
spnegoKeytab.getAbsolutePath());
+ conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
+ ksmKeyTab.getAbsolutePath());
}
@Test
public void testSecureScmStartupSuccess() throws Exception {
+
+ initSCM();
+ scm = StorageContainerManager.createSCM(null, conf);
+ //Reads the SCM Info from SCM instance
+ ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+ Assert.assertEquals(clusterId, scmInfo.getClusterId());
+ Assert.assertEquals(scmId, scmInfo.getScmId());
+ }
+
+ private void initSCM()
+ throws IOException, AuthenticationException {
final String path = GenericTestUtils
.getTempPath(UUID.randomUUID().toString());
Path scmPath = Paths.get(path, "scm-meta");
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
SCMStorage scmStore = new SCMStorage(conf);
- String clusterId = UUID.randomUUID().toString();
- String scmId = UUID.randomUUID().toString();
scmStore.setClusterId(clusterId);
scmStore.setScmId(scmId);
// writes the version file properties
scmStore.initialize();
- StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
- //Reads the SCM Info from SCM instance
- ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
- Assert.assertEquals(clusterId, scmInfo.getClusterId());
- Assert.assertEquals(scmId, scmInfo.getScmId());
}
@Test
public void testSecureScmStartupFailure() throws Exception {
- final String path = GenericTestUtils
- .getTempPath(UUID.randomUUID().toString());
- Path scmPath = Paths.get(path, "scm-meta");
-
- OzoneConfiguration conf = new OzoneConfiguration();
- conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
- conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
- conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
- "scm@" + miniKdc.getRealm());
+ initSCM();
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
- SCMStorage scmStore = new SCMStorage(conf);
- String clusterId = UUID.randomUUID().toString();
- String scmId = UUID.randomUUID().toString();
- scmStore.setClusterId(clusterId);
- scmStore.setScmId(scmId);
- // writes the version file properties
- scmStore.initialize();
LambdaTestUtils.intercept(IOException.class,
"Running in secure mode, but config doesn't have a keytab",
() -> {
@@ -178,28 +221,82 @@ public final class TestSecureOzoneCluster {
conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
"/etc/security/keytabs/scm.keytab");
+ testCommonKerberosFailures(
+ () -> StorageContainerManager.createSCM(null, conf));
+
+ }
+
+ private void testCommonKerberosFailures(Callable callable) throws Exception {
LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
- + "to login: for principal:",
- () -> {
- StorageContainerManager.createSCM(null, conf);
- });
+ + "to login: for principal:", callable);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"OAuth2");
LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
+ " attribute value for hadoop.security.authentication of OAuth2",
- () -> {
- StorageContainerManager.createSCM(null, conf);
- });
+ callable);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"KERBEROS_SSL");
LambdaTestUtils.intercept(AuthenticationException.class,
- "KERBEROS_SSL authentication method not support.",
- () -> {
- StorageContainerManager.createSCM(null, conf);
- });
+ "KERBEROS_SSL authentication method not",
+ callable);
+ }
+ /**
+ * Tests the secure KSM Initialization Failure.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testSecureKsmInitializationFailure() throws Exception {
+ initSCM();
+ // Create a secure SCM instance as ksm client will connect to it
+ scm = StorageContainerManager.createSCM(null, conf);
+
+ final String path = GenericTestUtils
+ .getTempPath(UUID.randomUUID().toString());
+ KSMStorage ksmStore = new KSMStorage(conf);
+ ksmStore.setClusterId("testClusterId");
+ ksmStore.setScmId("testScmId");
+ // writes the version file properties
+ ksmStore.initialize();
+ conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+ "non-existent-user@EXAMPLE.com");
+ testCommonKerberosFailures(() -> KeySpaceManager.createKSM(null, conf));
+ }
+
+ /**
+ * Tests the secure KSM Initialization success.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testSecureKsmInitializationSuccess() throws Exception {
+ initSCM();
+ // Create a secure SCM instance as ksm client will connect to it
+ scm = StorageContainerManager.createSCM(null, conf);
+ LogCapturer logs = LogCapturer.captureLogs(KeySpaceManager.LOG);
+ GenericTestUtils
+ .setLogLevel(LoggerFactory.getLogger(KeySpaceManager.class.getName()),
+ org.slf4j.event.Level.INFO);
+
+ final String path = GenericTestUtils
+ .getTempPath(UUID.randomUUID().toString());
+ Path metaDirPath = Paths.get(path, "ksm-meta");
+
+ KSMStorage ksmStore = new KSMStorage(conf);
+ ksmStore.setClusterId("testClusterId");
+ ksmStore.setScmId("testScmId");
+ // writes the version file properties
+ ksmStore.initialize();
+ try {
+ ksm = KeySpaceManager.createKSM(null, conf);
+ } catch (Exception ex) {
+ // Expects timeout failure from scmClient in KSM but KSM user login via
+ // kerberos should succeed
+ Assert.assertTrue(logs.getOutput().contains("KSM login successful"));
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index d0f0c9b..700260d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
@@ -59,7 +60,10 @@ import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
@@ -83,6 +87,8 @@ import java.util.List;
import java.util.Map;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
.OZONE_KSM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
@@ -102,7 +108,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
public final class KeySpaceManager extends ServiceRuntimeInfoImpl
implements KeySpaceManagerProtocol, KSMMXBean {
- private static final Logger LOG =
+ public static final Logger LOG =
LoggerFactory.getLogger(KeySpaceManager.class);
private static final String USAGE =
@@ -153,8 +159,8 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
private KeySpaceManager(OzoneConfiguration conf) throws IOException {
Preconditions.checkNotNull(conf);
configuration = conf;
+
ksmStorage = new KSMStorage(conf);
- scmBlockClient = getScmBlockClient(configuration);
scmContainerClient = getScmContainerClient(configuration);
if (ksmStorage.getState() != StorageState.INITIALIZED) {
throw new KSMException("KSM not initialized.",
@@ -162,6 +168,7 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
}
// verifies that the SCM info in the KSM Version file is correct.
+ scmBlockClient = getScmBlockClient(configuration);
ScmInfo scmInfo = scmBlockClient.getScmInfo();
if (!(scmInfo.getClusterId().equals(ksmStorage.getClusterID()) && scmInfo
.getScmId().equals(ksmStorage.getScmId()))) {
@@ -194,6 +201,34 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
}
/**
+ * Login KSM service user if security and Kerberos are enabled.
+ *
+ * @param conf
+ * @throws IOException, AuthenticationException
+ */
+ private static void loginKSMUser(OzoneConfiguration conf)
+ throws IOException, AuthenticationException {
+
+ if (SecurityUtil.getAuthenticationMethod(conf).equals
+ (AuthenticationMethod.KERBEROS)) {
+ LOG.debug("Ozone security is enabled. Attempting login for KSM user. "
+ + "Principal: {},keytab: {}", conf.get(HDDS_KSM_KERBEROS_PRINCIPAL_KEY),
+ conf.get(HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY));
+
+ UserGroupInformation.setConfiguration(conf);
+
+ InetSocketAddress socAddr = getKsmAddress(conf);
+ SecurityUtil.login(conf, HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
+ HDDS_KSM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+ } else {
+ throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
+ (conf) + " authentication method not supported. KSM user login "
+ + "failed.");
+ }
+ LOG.info("KSM login successful.");
+ }
+
+ /**
* Create a scm block client, used by putKey() and getKey().
*
* @return {@link ScmBlockLocationProtocol}
@@ -337,7 +372,7 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
*/
public static KeySpaceManager createKSM(String[] argv,
- OzoneConfiguration conf) throws IOException {
+ OzoneConfiguration conf) throws IOException, AuthenticationException {
if (!isHddsEnabled(conf)) {
System.err.println("KSM cannot be started in secure mode or when " +
OZONE_ENABLED + " is set to false");
@@ -349,6 +384,10 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
terminate(1);
return null;
}
+ // Authenticate KSM if security is enabled
+ if (conf.getBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true)) {
+ loginKSMUser(conf);
+ }
switch (startOpt) {
case CREATEOBJECTSTORE:
terminate(ksmInit(conf) ? 0 : 1);
@@ -443,7 +482,13 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
metadataManager.start();
keyManager.start();
ksmRpcServer.start();
- httpServer.start();
+ try {
+ httpServer.start();
+ } catch (Exception ex) {
+ // Allow KSM to start as Http Server failure is not fatal.
+ LOG.error("KSM HttpServer failed to start.", ex);
+ }
+
registerMXBean();
setStartTime();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e95b5de/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
index 478804b..a0d15b3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.ksm;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.server.BaseHttpServer;
@@ -65,11 +64,11 @@ public class KeySpaceManagerHttpServer extends BaseHttpServer {
}
@Override protected String getKeytabFile() {
- return KSMConfigKeys.OZONE_KSM_KEYTAB_FILE;
+ return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE;
}
@Override protected String getSpnegoPrincipal() {
- return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+ return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
}
@Override protected String getEnabledKey() {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[24/50] [abbrv] hadoop git commit: HDDS-52. Fix
TestSCMCli#testInfoContainer. Contributed by Mukul Kumar Singh.
Posted by xy...@apache.org.
HDDS-52. Fix TestSCMCli#testInfoContainer.
Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e26e1f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e26e1f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e26e1f2
Branch: refs/heads/HDDS-4
Commit: 7e26e1f2166d2238a63a4086061a21e60e253605
Parents: a5449d3
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 10:07:32 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sat May 12 10:24:05 2018 -0700
----------------------------------------------------------------------
.../src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e26e1f2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 19bc423..732221a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -331,7 +331,7 @@ public class TestSCMCli {
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
expected = String
- .format(formatStrWithHash, container.getContainerID(), openStatus,
+ .format(formatStr, container.getContainerID(), openStatus,
data.getDBPath(), data.getContainerPath(), "",
datanodeDetails.getHostName(), datanodeDetails.getHostName());
assertEquals(expected, out.toString());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[25/50] [abbrv] hadoop git commit: HDDS-32. Fix
TestContainerDeletionChoosingPolicy#testTopNOrderedChoosingPolicy.
Contributed by Mukul Kumar Singh.
Posted by xy...@apache.org.
HDDS-32. Fix TestContainerDeletionChoosingPolicy#testTopNOrderedChoosingPolicy.
Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53c8ebcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53c8ebcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53c8ebcd
Branch: refs/heads/HDDS-4
Commit: 53c8ebcd0285ca91f7d758084cd91df93af0ffb3
Parents: cd8b9e9
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 10:18:53 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sat May 12 10:24:06 2018 -0700
----------------------------------------------------------------------
.../common/impl/TestContainerDeletionChoosingPolicy.java | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c8ebcd/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index 331db40..4344419 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -28,6 +28,7 @@ import java.util.Map;
import java.util.Random;
import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -138,14 +139,14 @@ public class TestContainerDeletionChoosingPolicy {
int numContainers = 10;
Random random = new Random();
- Map<String, Integer> name2Count = new HashMap<>();
+ Map<Long, Integer> name2Count = new HashMap<>();
// create [numContainers + 1] containers
for (int i = 0; i <= numContainers; i++) {
- String containerName = OzoneUtils.getRequestID();
- ContainerData data = new ContainerData(new Long(i), conf);
+ long containerId = RandomUtils.nextLong();
+ ContainerData data = new ContainerData(containerId, conf);
containerManager.createContainer(data);
Assert.assertTrue(
- containerManager.getContainerMap().containsKey(containerName));
+ containerManager.getContainerMap().containsKey(containerId));
// don't create deletion blocks in the last container.
if (i == numContainers) {
@@ -155,7 +156,7 @@ public class TestContainerDeletionChoosingPolicy {
// create random number of deletion blocks and write to container db
int deletionBlocks = random.nextInt(numContainers) + 1;
// record <ContainerName, DeletionCount> value
- name2Count.put(containerName, deletionBlocks);
+ name2Count.put(containerId, deletionBlocks);
for (int j = 0; j <= deletionBlocks; j++) {
MetadataStore metadata = KeyUtils.getDB(data, conf);
String blk = "blk" + i + "-" + j;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[22/50] [abbrv] hadoop git commit: HADOOP-15441. Log kms url and
token service at debug level. Contributed by Gabor Bota
Posted by xy...@apache.org.
HADOOP-15441. Log kms url and token service at debug level. Contributed by Gabor Bota
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5449d36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5449d36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5449d36
Branch: refs/heads/HDDS-4
Commit: a5449d36312c3de953e05ba8b3b5acf818a0c7e7
Parents: 1194ec3
Author: Rushabh Shah <sh...@apache.org>
Authored: Sat May 12 12:19:13 2018 -0500
Committer: Rushabh Shah <sh...@apache.org>
Committed: Sat May 12 12:19:13 2018 -0500
----------------------------------------------------------------------
.../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5449d36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index dddd358..08787a5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -402,7 +402,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
new EncryptedQueueRefiller());
authToken = new DelegationTokenAuthenticatedURL.Token();
- LOG.info("KMSClientProvider for KMS url: {} delegation token service: {}" +
+ LOG.debug("KMSClientProvider for KMS url: {} delegation token service: {}" +
" created.", kmsUrl, dtService);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/50] [abbrv] hadoop git commit: YARN-8274. Fixed a bug on docker
start command. Contributed by Jason Lowe
Posted by xy...@apache.org.
YARN-8274. Fixed a bug on docker start command.
Contributed by Jason Lowe
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f7912e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f7912e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f7912e0
Branch: refs/heads/HDDS-4
Commit: 8f7912e0fee5de608ce8824fa5bd81b01b9a7c38
Parents: c1d64d6
Author: Eric Yang <ey...@apache.org>
Authored: Fri May 11 14:23:16 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Fri May 11 14:23:16 2018 -0400
----------------------------------------------------------------------
.../container-executor/impl/utils/docker-util.c | 14 ++++++++++++--
.../container-executor/test/utils/test_docker_util.cc | 4 ++--
2 files changed, 14 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f7912e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 8cd59f7..5be02a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -830,6 +830,7 @@ free_and_exit:
int get_docker_start_command(const char *command_file, const struct configuration *conf, args *args) {
int ret = 0;
+ char *docker = NULL;
char *container_name = NULL;
struct configuration command_config = {0, NULL};
ret = read_and_verify_command_file(command_file, DOCKER_START_COMMAND, &command_config);
@@ -842,9 +843,18 @@ int get_docker_start_command(const char *command_file, const struct configuratio
return INVALID_DOCKER_CONTAINER_NAME;
}
+ docker = get_docker_binary(conf);
+ ret = add_to_args(args, docker);
+ free(docker);
+ if (ret != 0) {
+ ret = BUFFER_TOO_SMALL;
+ goto free_and_exit;
+ }
+
ret = add_docker_config_param(&command_config, args);
if (ret != 0) {
- return BUFFER_TOO_SMALL;
+ ret = BUFFER_TOO_SMALL;
+ goto free_and_exit;
}
ret = add_to_args(args, DOCKER_START_COMMAND);
@@ -933,7 +943,7 @@ static int set_pid_namespace(const struct configuration *command_config,
if (pid_host_enabled != NULL) {
if (strcmp(pid_host_enabled, "1") == 0 ||
strcasecmp(pid_host_enabled, "True") == 0) {
- ret = add_to_args(args, "--pid='host'");
+ ret = add_to_args(args, "--pid=host");
if (ret != 0) {
ret = BUFFER_TOO_SMALL;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f7912e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index 1096935..3746fa1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -365,7 +365,7 @@ namespace ContainerExecutor {
std::vector<std::pair<std::string, std::string> > file_cmd_vec;
file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
"[docker-command-execution]\n docker-command=start\n name=container_e1_12312_11111_02_000001",
- "start container_e1_12312_11111_02_000001"));
+ "/usr/bin/docker start container_e1_12312_11111_02_000001"));
std::vector<std::pair<std::string, int> > bad_file_cmd_vec;
bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
@@ -514,7 +514,7 @@ namespace ContainerExecutor {
std::vector<std::pair<std::string, std::string> >::const_iterator itr;
std::vector<std::pair<std::string, int> >::const_iterator itr2;
file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
- "[docker-command-execution]\n docker-command=run\n pid=host", "--pid='host'"));
+ "[docker-command-execution]\n docker-command=run\n pid=host", "--pid=host"));
file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
"[docker-command-execution]\n docker-command=run", ""));
bad_file_cmd_vec.push_back(std::make_pair<std::string, int>(
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[50/50] [abbrv] hadoop git commit: Revert "Bad merge with
996a627b289947af3894bf83e7b63ec702a665cd"
Posted by xy...@apache.org.
Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"
This reverts commit 996a627b289947af3894bf83e7b63ec702a665cd.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/938baa21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/938baa21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/938baa21
Branch: refs/heads/HDDS-4
Commit: 938baa2192f4ab6d500e6ba79d86cfb56559da1a
Parents: 996a627
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue May 15 16:56:24 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue May 15 16:56:24 2018 -0700
----------------------------------------------------------------------
hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 ++++++++++++
1 file changed, 12 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/938baa21/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 6998a85..deb286d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -120,6 +120,18 @@
</description>
</property>
<property>
+ <name>dfs.ratis.client.request.timeout.duration</name>
+ <value>3s</value>
+ <tag>OZONE, RATIS, MANAGEMENT</tag>
+ <description>The timeout duration for ratis client request.</description>
+ </property>
+ <property>
+ <name>dfs.ratis.server.request.timeout.duration</name>
+ <value>3s</value>
+ <tag>OZONE, RATIS, MANAGEMENT</tag>
+ <description>The timeout duration for ratis server request.</description>
+ </property>
+ <property>
<name>ozone.container.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[34/50] [abbrv] hadoop git commit: HDDS-19. Update ozone to latest
ratis snapshot build (0.1.1-alpha-d7d7061-SNAPSHOT). Contributed by Lokesh
Jain.
Posted by xy...@apache.org.
HDDS-19. Update ozone to latest ratis snapshot build (0.1.1-alpha-d7d7061-SNAPSHOT). Contributed by Lokesh Jain.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6653f4ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6653f4ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6653f4ba
Branch: refs/heads/HDDS-4
Commit: 6653f4ba2ee21e6deb3736b41381451428f620e0
Parents: 960940e
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Mon May 14 22:25:03 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Mon May 14 22:25:03 2018 +0530
----------------------------------------------------------------------
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 13 ++++++++
.../apache/hadoop/ozone/OzoneConfigKeys.java | 11 +++++++
.../common/src/main/resources/ozone-default.xml | 12 +++++++
.../server/ratis/XceiverServerRatis.java | 33 ++++++++++++++++++--
hadoop-project/pom.xml | 10 ++----
5 files changed, 69 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6653f4ba/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 7f40ab2..29ccf30 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -19,6 +19,9 @@ package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.ratis.util.TimeDuration;
+
+import java.util.concurrent.TimeUnit;
/**
* This class contains constants for configuration keys used in SCM.
@@ -62,6 +65,16 @@ public final class ScmConfigKeys {
"dfs.container.ratis.segment.preallocated.size";
public static final int
DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 1024 * 1024;
+ public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
+ "dfs.ratis.client.request.timeout.duration";
+ public static final TimeDuration
+ DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
+ TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
+ public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
+ "dfs.ratis.server.request.timeout.duration";
+ public static final TimeDuration
+ DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
+ TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
// TODO : this is copied from OzoneConsts, may need to move to a better place
public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6653f4ba/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 72531a2..b8dbd7b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.ratis.util.TimeDuration;
/**
* This class contains constants for configuration keys used in Ozone.
@@ -226,6 +227,16 @@ public final class OzoneConfigKeys {
= ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
"dfs.container.ratis.datanode.storage.dir";
+ public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
+ ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY;
+ public static final TimeDuration
+ DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
+ ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT;
+ public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
+ ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
+ public static final TimeDuration
+ DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
+ ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT;
public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
"ozone.web.authentication.kerberos.principal";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6653f4ba/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index cb0ab18..774b1b8 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -120,6 +120,18 @@
</description>
</property>
<property>
+ <name>dfs.ratis.client.request.timeout.duration</name>
+ <value>3s</value>
+ <tag>OZONE, RATIS, MANAGEMENT</tag>
+ <description>The timeout duration for ratis client request.</description>
+ </property>
+ <property>
+ <name>dfs.ratis.server.request.timeout.duration</name>
+ <value>3s</value>
+ <tag>OZONE, RATIS, MANAGEMENT</tag>
+ <description>The timeout duration for ratis server request.</description>
+ </property>
+ <property>
<name>ozone.container.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6653f4ba/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 4bd55f1..46def09 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.container.common.transport.server
.XceiverServerSpi;
import org.apache.ratis.RaftConfigKeys;
import org.apache.ratis.RatisHelper;
+import org.apache.ratis.client.RaftClientConfigKeys;
import org.apache.ratis.conf.RaftProperties;
import org.apache.ratis.grpc.GrpcConfigKeys;
import org.apache.ratis.netty.NettyConfigKeys;
@@ -78,11 +79,31 @@ public final class XceiverServerRatis implements XceiverServerSpi {
final int numWriteChunkThreads = conf.getInt(
OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY,
OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT);
+ TimeUnit timeUnit =
+ OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
+ .getUnit();
+ long duration = conf.getTimeDuration(
+ OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
+ OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
+ .getDuration(), timeUnit);
+ final TimeDuration clientRequestTimeout =
+ TimeDuration.valueOf(duration, timeUnit);
+ timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
+ .getUnit();
+ duration = conf.getTimeDuration(
+ OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY,
+ OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
+ .getDuration(), timeUnit);
+ final TimeDuration serverRequestTimeout =
+ TimeDuration.valueOf(duration, timeUnit);
Objects.requireNonNull(dd, "id == null");
this.port = port;
- RaftProperties serverProperties = newRaftProperties(rpc, port,
- storageDir, maxChunkSize, raftSegmentSize, raftSegmentPreallocatedSize);
+ RaftProperties serverProperties =
+ newRaftProperties(rpc, port, storageDir, maxChunkSize, raftSegmentSize,
+ raftSegmentPreallocatedSize);
+ setRequestTimeout(serverProperties, clientRequestTimeout,
+ serverRequestTimeout);
writeChunkExecutor =
new ThreadPoolExecutor(numWriteChunkThreads, numWriteChunkThreads,
@@ -99,6 +120,14 @@ public final class XceiverServerRatis implements XceiverServerSpi {
.build();
}
+ private static void setRequestTimeout(RaftProperties serverProperties,
+ TimeDuration clientRequestTimeout, TimeDuration serverRequestTimeout) {
+ RaftClientConfigKeys.Rpc
+ .setRequestTimeout(serverProperties, clientRequestTimeout);
+ RaftServerConfigKeys.Rpc
+ .setRequestTimeout(serverProperties, serverRequestTimeout);
+ }
+
private static RaftProperties newRaftProperties(
RpcType rpc, int port, String storageDir, int scmChunkSize,
int raftSegmentSize, int raftSegmentPreallocatedSize) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6653f4ba/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 862a693..bcb816e 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -97,7 +97,7 @@
<ldap-api.version>1.0.0-M33</ldap-api.version>
<!-- Apache Ratis version -->
- <ratis.version>0.1.1-alpha-8fd74ed-SNAPSHOT</ratis.version>
+ <ratis.version>0.1.1-alpha-d7d7061-SNAPSHOT</ratis.version>
<jcache.version>1.0-alpha-1</jcache.version>
<ehcache.version>3.3.1</ehcache.version>
<hikari.version>2.4.12</hikari.version>
@@ -106,7 +106,7 @@
<!-- Maven protoc compiler -->
<protobuf-maven-plugin.version>0.5.1</protobuf-maven-plugin.version>
- <protobuf-compile.version>3.1.0</protobuf-compile.version>
+ <protobuf-compile.version>3.5.0</protobuf-compile.version>
<os-maven-plugin.version>1.5.0.Final</os-maven-plugin.version>
<!-- define the Java language version used by the compiler -->
@@ -881,12 +881,6 @@
</dependency>
<dependency>
- <groupId>org.jctools</groupId>
- <artifactId>jctools-core</artifactId>
- <version>1.2.1</version>
- </dependency>
-
- <dependency>
<groupId>org.apache.ratis</groupId>
<artifactId>ratis-proto-shaded</artifactId>
<version>${ratis.version}</version>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/50] [abbrv] hadoop git commit: YARN-7003. DRAINING state of
queues is not recovered after RM restart. Contributed by Tao Yang.
Posted by xy...@apache.org.
YARN-7003. DRAINING state of queues is not recovered after RM restart. Contributed by Tao Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9db9cd95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9db9cd95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9db9cd95
Branch: refs/heads/HDDS-4
Commit: 9db9cd95bd0348070a286e69e7965c03c9bd39d6
Parents: d76fbbc
Author: Weiwei Yang <ww...@apache.org>
Authored: Fri May 11 10:47:04 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Fri May 11 10:47:04 2018 +0800
----------------------------------------------------------------------
.../scheduler/capacity/AbstractCSQueue.java | 15 +++++
.../scheduler/capacity/CapacityScheduler.java | 7 +++
.../scheduler/capacity/TestQueueState.java | 60 ++++++++++++++++++++
3 files changed, 82 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db9cd95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 651d0e9..67b676b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -1244,4 +1244,19 @@ public abstract class AbstractCSQueue implements CSQueue {
public Map<String, Float> getUserWeights() {
return userWeights;
}
+
+ public void recoverDrainingState() {
+ try {
+ this.writeLock.lock();
+ if (getState() == QueueState.STOPPED) {
+ updateQueueState(QueueState.DRAINING);
+ }
+ LOG.info("Recover draining state for queue " + this.getQueuePath());
+ if (getParent() != null && getParent().getState() == QueueState.STOPPED) {
+ ((AbstractCSQueue) getParent()).recoverDrainingState();
+ }
+ } finally {
+ this.writeLock.unlock();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db9cd95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 1d6c104..162d3bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.ReservationId;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -808,6 +809,12 @@ public class CapacityScheduler extends
throw new QueueInvalidException(queueErrorMsg);
}
}
+ // When recovering apps in this queue but queue is in STOPPED state,
+ // that means its previous state was DRAINING. So we auto transit
+ // the state to DRAINING for recovery.
+ if (queue.getState() == QueueState.STOPPED) {
+ ((LeafQueue) queue).recoverDrainingState();
+ }
// Submit to the queue
try {
queue.submitApplication(applicationId, user, queueName);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db9cd95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
index 9f2933e..0a39e99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueState.java
@@ -32,7 +32,12 @@ import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert;
@@ -197,4 +202,59 @@ public class TestQueueState {
.thenCallRealMethod();
return application;
}
+
+ @Test (timeout = 30000)
+ public void testRecoverDrainingStateAfterRMRestart() throws Exception {
+ // init conf
+ CapacitySchedulerConfiguration newConf =
+ new CapacitySchedulerConfiguration();
+ newConf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+ newConf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,
+ false);
+ newConf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+ newConf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 1);
+ newConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[]{Q1});
+ newConf.setQueues(Q1_PATH, new String[]{Q2});
+ newConf.setCapacity(Q1_PATH, 100);
+ newConf.setCapacity(Q2_PATH, 100);
+
+ // init state store
+ MemoryRMStateStore newMemStore = new MemoryRMStateStore();
+ newMemStore.init(newConf);
+ // init RM & NMs & Nodes
+ MockRM rm = new MockRM(newConf, newMemStore);
+ rm.start();
+ MockNM nm = rm.registerNode("h1:1234", 204800);
+
+ // submit an app, AM is running on nm1
+ RMApp app = rm.submitApp(1024, "appname", "appuser", null, Q2);
+ MockRM.launchAM(app, rm, nm);
+ rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
+ // update queue state to STOPPED
+ newConf.setState(Q1_PATH, QueueState.STOPPED);
+ CapacityScheduler capacityScheduler =
+ (CapacityScheduler) rm.getRMContext().getScheduler();
+ capacityScheduler.reinitialize(newConf, rm.getRMContext());
+ // current queue state should be DRAINING
+ Assert.assertEquals(QueueState.DRAINING,
+ capacityScheduler.getQueue(Q2).getState());
+ Assert.assertEquals(QueueState.DRAINING,
+ capacityScheduler.getQueue(Q1).getState());
+
+ // RM restart
+ rm = new MockRM(newConf, newMemStore);
+ rm.start();
+ rm.registerNode("h1:1234", 204800);
+
+ // queue state should be DRAINING after app recovered
+ rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
+ capacityScheduler = (CapacityScheduler) rm.getRMContext().getScheduler();
+ Assert.assertEquals(QueueState.DRAINING,
+ capacityScheduler.getQueue(Q2).getState());
+ Assert.assertEquals(QueueState.DRAINING,
+ capacityScheduler.getQueue(Q1).getState());
+
+ // close rm
+ rm.close();
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/50] [abbrv] hadoop git commit: HDFS-13346. RBF: Fix
synchronization of router quota and nameservice quota.
Posted by xy...@apache.org.
HDFS-13346. RBF: Fix synchronization of router quota and nameservice quota.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a922b9c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a922b9c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a922b9c8
Branch: refs/heads/HDDS-4
Commit: a922b9c82cc564e86dc0ef84bcc3597cb7b7c211
Parents: 9db9cd9
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri May 11 14:51:30 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Fri May 11 14:51:30 2018 +0800
----------------------------------------------------------------------
.../federation/router/RouterAdminServer.java | 28 ++++++++++++-
.../federation/router/TestRouterAdminCLI.java | 10 +++++
.../federation/router/TestRouterQuota.java | 43 +++++++++++++++++++-
3 files changed, 79 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a922b9c8/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index 3da9a5a..139dfb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -26,6 +26,7 @@ import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB;
@@ -54,6 +55,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableE
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
@@ -228,7 +230,31 @@ public class RouterAdminServer extends AbstractService
@Override
public UpdateMountTableEntryResponse updateMountTableEntry(
UpdateMountTableEntryRequest request) throws IOException {
- return getMountTableStore().updateMountTableEntry(request);
+ UpdateMountTableEntryResponse response =
+ getMountTableStore().updateMountTableEntry(request);
+
+ MountTable mountTable = request.getEntry();
+ if (mountTable != null) {
+ synchronizeQuota(mountTable);
+ }
+ return response;
+ }
+
+ /**
+ * Synchronize the quota value across mount table and subclusters.
+ * @param mountTable Quota set in given mount table.
+ * @throws IOException
+ */
+ private void synchronizeQuota(MountTable mountTable) throws IOException {
+ String path = mountTable.getSourcePath();
+ long nsQuota = mountTable.getQuota().getQuota();
+ long ssQuota = mountTable.getQuota().getSpaceQuota();
+
+ if (nsQuota != HdfsConstants.QUOTA_DONT_SET
+ || ssQuota != HdfsConstants.QUOTA_DONT_SET) {
+ this.router.getRpcServer().getQuotaModule().setQuota(path, nsQuota,
+ ssQuota, null);
+ }
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a922b9c8/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 2537c19..7e04e61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -51,6 +51,8 @@ import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
import com.google.common.base.Supplier;
@@ -104,6 +106,14 @@ public class TestRouterAdminCLI {
membership.registerNamenode(
createNamenodeReport("ns1", "nn1", HAServiceState.ACTIVE));
stateStore.refreshCaches(true);
+
+ // Mock the quota module since no real namenode is started up.
+ Quota quota = Mockito
+ .spy(routerContext.getRouter().createRpcServer().getQuotaModule());
+ Mockito.doNothing().when(quota).setQuota(Mockito.anyString(),
+ Mockito.anyLong(), Mockito.anyLong(), Mockito.any());
+ Whitebox.setInternalState(
+ routerContext.getRouter().getRpcServer(), "quotaCall", quota);
}
@AfterClass
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a922b9c8/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
index 0e62200..c331c6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.federation.router;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
import java.io.IOException;
@@ -37,9 +38,9 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
@@ -49,8 +50,10 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntr
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -452,4 +455,42 @@ public class TestRouterQuota {
return removeResponse.getEntries();
}
+
+ @Test
+ public void testQuotaSynchronization() throws IOException {
+ long updateNsQuota = 3;
+ long updateSsQuota = 4;
+ MountTable mountTable = MountTable.newInstance("/quotaSync",
+ Collections.singletonMap("ns0", "/"), Time.now(), Time.now());
+ mountTable.setQuota(new RouterQuotaUsage.Builder().quota(1)
+ .spaceQuota(2).build());
+ // Add new mount table
+ addMountTable(mountTable);
+
+ // ensure the quota is not set as updated value
+ QuotaUsage realQuota = nnContext1.getFileSystem()
+ .getQuotaUsage(new Path("/"));
+ assertNotEquals(updateNsQuota, realQuota.getQuota());
+ assertNotEquals(updateSsQuota, realQuota.getSpaceQuota());
+
+ // Call periodicInvoke to ensure quota updated in quota manager
+ // and state store.
+ RouterQuotaUpdateService updateService = routerContext.getRouter()
+ .getQuotaCacheUpdateService();
+ updateService.periodicInvoke();
+
+ mountTable.setQuota(new RouterQuotaUsage.Builder().quota(updateNsQuota)
+ .spaceQuota(updateSsQuota).build());
+ UpdateMountTableEntryRequest updateRequest = UpdateMountTableEntryRequest
+ .newInstance(mountTable);
+ RouterClient client = routerContext.getAdminClient();
+ MountTableManager mountTableManager = client.getMountTableManager();
+ mountTableManager.updateMountTableEntry(updateRequest);
+
+ // verify if the quota is updated in real path
+ realQuota = nnContext1.getFileSystem().getQuotaUsage(
+ new Path("/"));
+ assertEquals(updateNsQuota, realQuota.getQuota());
+ assertEquals(updateSsQuota, realQuota.getSpaceQuota());
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[30/50] [abbrv] hadoop git commit: YARN-8271. [UI2] Improve labeling
of certain tables. Contributed by Yesha Vora.
Posted by xy...@apache.org.
YARN-8271. [UI2] Improve labeling of certain tables. Contributed by Yesha Vora.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89d0b87a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89d0b87a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89d0b87a
Branch: refs/heads/HDDS-4
Commit: 89d0b87ad324db09f14e00031d20635083d576ed
Parents: f3f544b
Author: Sunil G <su...@apache.org>
Authored: Mon May 14 20:59:31 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Mon May 14 20:59:31 2018 +0530
----------------------------------------------------------------------
.../hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js | 2 +-
.../hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js | 4 ++--
.../src/main/webapp/app/templates/cluster-overview.hbs | 4 ++--
.../src/main/webapp/app/templates/components/node-menu-panel.hbs | 4 ++--
.../app/templates/components/yarn-queue/capacity-queue-info.hbs | 2 +-
.../app/templates/components/yarn-queue/capacity-queue.hbs | 2 +-
.../app/templates/components/yarn-queue/fair-queue-info.hbs | 2 +-
.../webapp/app/templates/components/yarn-queue/fair-queue.hbs | 2 +-
.../hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs | 4 ++--
9 files changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js
index b36098b..cb0c8d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools.js
@@ -23,7 +23,7 @@ export default Ember.Controller.extend({
text: "Home",
routeName: 'application'
}, {
- text: "Yarn Tools",
+ text: "YARN Tools",
routeName: 'yarn-tools',
}],
});
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
index e1eba5a..ace50e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
@@ -56,11 +56,11 @@ export default Ember.Helper.helper(function(params,hash) {
html = html + ' class="active"';
}
html = html + '><a href="#/yarn-node-apps/' + hash.nodeId + '/' + hash.nodeAddr +
- '">List of Applications</a></li><li';
+ '">List of Applications on this Node</a></li><li';
if (hash.path === 'yarn-node-containers') {
html = html + ' class="active"';
}
html = html + '><a href="#/yarn-node-containers/' +hash.nodeId + '/' + hash.nodeAddr +
- '">List of Containers</a></li></ul></ul></div>';
+ '">List of Containers on this Node</a></li></ul></ul></div>';
return Ember.String.htmlSafe(html);
});
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
index ff4682a..e7752ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
@@ -58,7 +58,7 @@
<div class="col-lg-4 container-fluid">
<div class="panel panel-default">
<div class="panel-heading">
- Finished Apps
+ Finished Apps From All Users
</div>
<div class="container-fluid" id="finishedapps-donut-chart">
{{donut-chart data=model.clusterMetrics.firstObject.getFinishedAppsDataForDonutChart
@@ -74,7 +74,7 @@
<div class="col-lg-4 container-fluid">
<div class="panel panel-default">
<div class="panel-heading">
- Running Apps
+ Running Apps From All Users
</div>
<div class="container-fluid" id="runningapps-donut-chart">
{{donut-chart data=model.clusterMetrics.firstObject.getRunningAppsDataForDonutChart
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
index acdff2f..ba48952 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/node-menu-panel.hbs
@@ -29,11 +29,11 @@
{{/link-to}}
{{/link-to}}
{{#link-to 'yarn-node-apps' tagName="li"}}
- {{#link-to 'yarn-node-apps' nodeId encodedAddr}}List of Applications
+ {{#link-to 'yarn-node-apps' nodeId encodedAddr}}List of Applications on this Node
{{/link-to}}
{{/link-to}}
{{#link-to 'yarn-node-containers' tagName="li"}}
- {{#link-to 'yarn-node-containers' nodeId encodedAddr}}List of Containers
+ {{#link-to 'yarn-node-containers' nodeId encodedAddr}}List of Containers on this Node
{{/link-to}}
{{/link-to}}
{{#if (and nmGpuInfo nmGpuInfo.info.totalGpuDevices)}}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
index a7260bc..11fed76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
@@ -20,7 +20,7 @@
<div class="col-lg-6">
<div class="panel panel-default">
<div class="panel-heading">
- Running Apps: {{model.selected}}
+ Running Apps From All Users: {{model.selected}}
</div>
<div id="numapplications-donut-chart">
{{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
index 9ad2a6f..6615b1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
@@ -35,7 +35,7 @@
{{yarn-queue-partition-capacity-labels partitionMap=model.selectedQueue.partitionMap queue=model.selectedQueue filteredPartition=filteredPartition}}
</div>
- <h5> Running Apps </h5>
+ <h5> Running Apps From All Users in Queue </h5>
<div id="numapplications-donut-chart">
{{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
showLabels=true
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
index a770bfe..3e368ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
@@ -51,7 +51,7 @@
<div class="col-lg-6 container-fluid">
<div class="panel panel-default">
<div class="panel-heading">
- Running Apps: {{model.selected}}
+ Running Apps From All Users: {{model.selected}}
</div>
<div class="container-fluid" id="numapplications-donut-chart">
{{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
index dcc80c1..85670da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
@@ -50,7 +50,7 @@
<div class="container-fluid">
<div class="panel panel-default">
<div class="panel-heading">
- Running Apps: {{model.selected}}
+ Running Apps From All Users: {{model.selected}}
</div>
<div class="container-fluid" id="numapplications-donut-chart">
{{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
http://git-wip-us.apache.org/repos/asf/hadoop/blob/89d0b87a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs
index 2f618fd..3efcf57 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools.hbs
@@ -67,7 +67,7 @@
<div class="col-lg-4 container-fluid">
<div class="panel panel-default">
<div class="panel-heading">
- Finished Apps
+ Finished Apps From All Users
</div>
<div class="container-fluid" id="finishedapps-donut-chart">
{{donut-chart data=model.clusterMetrics.firstObject.getFinishedAppsDataForDonutChart
@@ -84,7 +84,7 @@
<div class="col-lg-4 container-fluid">
<div class="panel panel-default">
<div class="panel-heading">
- Running Apps
+ Running Apps From All Users
</div>
<div class="container-fluid" id="runningapps-donut-chart">
{{donut-chart data=model.clusterMetrics.firstObject.getRunningAppsDataForDonutChart
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/50] [abbrv] hadoop git commit: YARN-8244.
TestContainerSchedulerQueuing.testStartMultipleContainers failed. Contributed
by Jim Brennan
Posted by xy...@apache.org.
YARN-8244. TestContainerSchedulerQueuing.testStartMultipleContainers failed. Contributed by Jim Brennan
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc912994
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc912994
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc912994
Branch: refs/heads/HDDS-4
Commit: dc912994a1bcb511dfda32a0649cef0c9bdc47d3
Parents: ba12e88
Author: Jason Lowe <jl...@apache.org>
Authored: Fri May 11 14:07:32 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri May 11 14:07:32 2018 -0500
----------------------------------------------------------------------
.../containermanager/TestContainerManager.java | 20 ++--
.../TestContainerSchedulerQueuing.java | 100 +++++++------------
2 files changed, 42 insertions(+), 78 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc912994/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 6d198a4..ee5259f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -1486,8 +1486,6 @@ public class TestContainerManager extends BaseContainerManagerTest {
containerManager.start();
List<StartContainerRequest> list = new ArrayList<>();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
for (int i = 0; i < 10; i++) {
ContainerId cId = createContainerId(i);
long identifier = 0;
@@ -1500,8 +1498,9 @@ public class TestContainerManager extends BaseContainerManagerTest {
createContainerToken(cId, identifier, context.getNodeId(), user,
context.getContainerTokenSecretManager());
StartContainerRequest request =
- StartContainerRequest.newInstance(containerLaunchContext,
- containerToken);
+ StartContainerRequest.newInstance(
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
+ containerToken);
list.add(request);
}
StartContainersRequest requestList =
@@ -1531,9 +1530,6 @@ public class TestContainerManager extends BaseContainerManagerTest {
public void testMultipleContainersStopAndGetStatus() throws Exception {
containerManager.start();
List<StartContainerRequest> startRequest = new ArrayList<>();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<ContainerId> containerIds = new ArrayList<>();
for (int i = 0; i < 10; i++) {
ContainerId cId;
@@ -1547,8 +1543,9 @@ public class TestContainerManager extends BaseContainerManagerTest {
createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(),
user, context.getContainerTokenSecretManager());
StartContainerRequest request =
- StartContainerRequest.newInstance(containerLaunchContext,
- containerToken);
+ StartContainerRequest.newInstance(
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
+ containerToken);
startRequest.add(request);
containerIds.add(cId);
}
@@ -1788,15 +1785,14 @@ public class TestContainerManager extends BaseContainerManagerTest {
containerManager.start();
// Start 4 containers 0..4 with default resource (1024, 1)
List<StartContainerRequest> list = new ArrayList<>();
- ContainerLaunchContext containerLaunchContext = recordFactory
- .newRecordInstance(ContainerLaunchContext.class);
for (int i = 0; i < 4; i++) {
ContainerId cId = createContainerId(i);
long identifier = DUMMY_RM_IDENTIFIER;
Token containerToken = createContainerToken(cId, identifier,
context.getNodeId(), user, context.getContainerTokenSecretManager());
StartContainerRequest request = StartContainerRequest.newInstance(
- containerLaunchContext, containerToken);
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
+ containerToken);
list.add(request);
}
StartContainersRequest requestList = StartContainersRequest
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc912994/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index 1da7e4a..70066c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -229,19 +229,16 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
public void testStartMultipleContainers() throws Exception {
containerManager.start();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(1024, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.GUARANTEED)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(1024, 1),
@@ -285,19 +282,16 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
public void testQueueMultipleContainers() throws Exception {
containerManager.start();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(3072, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.GUARANTEED)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(3072, 1),
@@ -343,26 +337,23 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
public void testStartAndQueueMultipleContainers() throws Exception {
containerManager.start();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(1024, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(1024, 1),
@@ -415,12 +406,9 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
public void testStartOpportunistcsWhenOppQueueIsFull() throws Exception {
containerManager.start();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
@@ -432,7 +420,7 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
YarnConfiguration.DEFAULT_NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH);
for (int i = 1; i < maxOppQueueLength + 2; i++) {
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
@@ -500,26 +488,23 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
public void testKillOpportunisticForGuaranteedContainer() throws Exception {
containerManager.start();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
@@ -589,12 +574,10 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
Listener listener = new Listener();
((NodeManager.DefaultContainerStateListener)containerManager.getContext().
getContainerStateTransitionListener()).addListener(listener);
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
@@ -610,7 +593,7 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
@@ -718,42 +701,42 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(3), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(4), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(5), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(6), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
@@ -840,14 +823,14 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
@@ -887,26 +870,23 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
public void testKillMultipleOpportunisticContainers() throws Exception {
containerManager.start();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
@@ -919,7 +899,7 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(3), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(1500, 1),
@@ -967,14 +947,11 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
public void testKillOnlyRequiredOpportunisticContainers() throws Exception {
containerManager.start();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
// Fill NM with Opportunistic containers
for (int i = 0; i < 4; i++) {
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
@@ -990,7 +967,7 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
// Now ask for two Guaranteed containers
for (int i = 4; i < 6; i++) {
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
@@ -1036,26 +1013,23 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
public void testStopQueuedContainer() throws Exception {
containerManager.start();
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.GUARANTEED)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(512, 1),
@@ -1142,19 +1116,16 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
((NodeManager.DefaultContainerStateListener)containerManager.getContext().
getContainerStateTransitionListener()).addListener(listener);
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(2048, 1),
context.getContainerTokenSecretManager(), null,
ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
context.getNodeId(),
user, BuilderUtils.newResource(1024, 1),
@@ -1265,12 +1236,9 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
containerManager.start();
// Construct the Container-id
ContainerId cId = createContainerId(0);
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
StartContainerRequest scRequest =
StartContainerRequest.newInstance(
- containerLaunchContext,
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
createContainerToken(cId, DUMMY_RM_IDENTIFIER,
context.getNodeId(), user, BuilderUtils.newResource(512, 1),
context.getContainerTokenSecretManager(), null));
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[29/50] [abbrv] hadoop git commit: Add 2.9.1 release notes and
changes documents
Posted by xy...@apache.org.
Add 2.9.1 release notes and changes documents
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3f544b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3f544b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3f544b0
Branch: refs/heads/HDDS-4
Commit: f3f544b00475583c4c9fe52be0d2004390979bd0
Parents: 66c9905
Author: Sammi Chen <sa...@intel.com>
Authored: Mon May 14 15:14:02 2018 +0800
Committer: Sammi Chen <sa...@intel.com>
Committed: Mon May 14 15:14:02 2018 +0800
----------------------------------------------------------------------
.../markdown/release/2.9.1/CHANGES.2.9.1.md | 277 ++++++++++++++++
.../release/2.9.1/RELEASENOTES.2.9.1.md | 88 ++++++
.../jdiff/Apache_Hadoop_HDFS_2.9.1.xml | 312 +++++++++++++++++++
hadoop-project-dist/pom.xml | 2 +-
4 files changed, 678 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f544b0/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
new file mode 100644
index 0000000..c5e53f6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
@@ -0,0 +1,277 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop" Changelog
+
+## Release 2.9.1 - 2018-04-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: Document Router and State Store metrics | Major | documentation | Yiqun Lin | Yiqun Lin |
+| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add ACL support for mount table | Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath | Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun Saxena |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use the ZooKeeper as the default State Store | Minor | documentation | Yiqun Lin | Yiqun Lin |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix doc error setting up client | Major | federation | tartarus | tartarus |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | Incorporate Aliyun OSS file system implementation | Major | fs, fs/oss | shimingfei | mingfei.shi |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | CryptoInputStream should implement unbuffer | Major | fs, security | John Zhuge | John Zhuge |
+| [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | AliyunOSS: backport Aliyun OSS module to branch-2 | Major | fs/oss | Genmao Yu | SammiChen |
+| [YARN-6851](https://issues.apache.org/jira/browse/YARN-6851) | Capacity Scheduler: document configs for controlling # containers allowed to be allocated per node heartbeat | Minor | . | Wei Yan | Wei Yan |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve robustness of the AggregatedLogDeletionService | Major | log-aggregation | Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager web UI should display container type in containers page | Major | nodemanager, webapp | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix TestUnbuffer#testUnbufferException failure | Minor | test | Jack Bearden | Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add readahead, dropbehind, and unbuffer to StreamCapabilities | Major | fs | John Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | AliyunOSS: change the default value of max error retry | Major | fs/oss | wujinhu | wujinhu |
+| [YARN-7642](https://issues.apache.org/jira/browse/YARN-7642) | Add test case to verify context update after container promotion or demotion with or without auto update | Minor | nodemanager | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15111](https://issues.apache.org/jira/browse/HADOOP-15111) | AliyunOSS: backport HADOOP-14993 to branch-2 | Major | fs/oss | Genmao Yu | Genmao Yu |
+| [HDFS-9023](https://issues.apache.org/jira/browse/HDFS-9023) | When NN is not able to identify DN for replication, reason behind it can be logged | Critical | hdfs-client, namenode | Surendra Singh Lilhore | Xiao Chen |
+| [YARN-7678](https://issues.apache.org/jira/browse/YARN-7678) | Ability to enable logging of container memory stats | Major | nodemanager | Jim Brennan | Jim Brennan |
+| [HDFS-12945](https://issues.apache.org/jira/browse/HDFS-12945) | Switch to ClientProtocol instead of NamenodeProtocols in NamenodeWebHdfsMethods | Minor | . | Wei Yan | Wei Yan |
+| [YARN-7590](https://issues.apache.org/jira/browse/YARN-7590) | Improve container-executor validation check | Major | security, yarn | Eric Yang | Eric Yang |
+| [HADOOP-15189](https://issues.apache.org/jira/browse/HADOOP-15189) | backport HADOOP-15039 to branch-2 and branch-3 | Blocker | . | Genmao Yu | Genmao Yu |
+| [HADOOP-15212](https://issues.apache.org/jira/browse/HADOOP-15212) | Add independent secret manager method for logging expired tokens | Major | security | Daryn Sharp | Daryn Sharp |
+| [YARN-7728](https://issues.apache.org/jira/browse/YARN-7728) | Expose container preemptions related information in Capacity Scheduler queue metrics | Major | . | Eric Payne | Eric Payne |
+| [MAPREDUCE-7048](https://issues.apache.org/jira/browse/MAPREDUCE-7048) | Uber AM can crash due to unknown task in statusUpdate | Major | mr-am | Peter Bacsko | Peter Bacsko |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to support per-store configuration | Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity Scheduler Intra-queue Preemption should be configurable for each queue | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize disk access for last partial chunk checksum of Finalized replica | Major | datanode | Wei-Chiu Chuang | Gabor Bota |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | increase maven heap size recommendations | Minor | build, documentation, test | Allen Wittenauer | Allen Wittenauer |
+| [HDFS-12884](https://issues.apache.org/jira/browse/HDFS-12884) | BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo | Major | namenode | Konstantin Shvachko | chencan |
+| [HADOOP-15334](https://issues.apache.org/jira/browse/HADOOP-15334) | Upgrade Maven surefire plugin | Major | build | Arpit Agarwal | Arpit Agarwal |
+| [YARN-7623](https://issues.apache.org/jira/browse/YARN-7623) | Fix the CapacityScheduler Queue configuration documentation | Major | . | Arun Suresh | Jonathan Hung |
+| [HDFS-13314](https://issues.apache.org/jira/browse/HDFS-13314) | NameNode should optionally exit if it detects FsImage corruption | Major | namenode | Arpit Agarwal | Arpit Agarwal |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13723](https://issues.apache.org/jira/browse/HADOOP-13723) | AliyunOSSInputStream#read() should update read bytes stat correctly | Major | tools | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14045](https://issues.apache.org/jira/browse/HADOOP-14045) | Aliyun OSS documentation missing from website | Major | documentation, fs/oss | Andrew Wang | Yiqun Lin |
+| [HADOOP-14458](https://issues.apache.org/jira/browse/HADOOP-14458) | Add missing imports to TestAliyunOSSFileSystemContract.java | Trivial | fs/oss, test | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14466](https://issues.apache.org/jira/browse/HADOOP-14466) | Remove useless document from TestAliyunOSSFileSystemContract.java | Minor | documentation | Akira Ajisaka | Chen Liang |
+| [HDFS-12318](https://issues.apache.org/jira/browse/HDFS-12318) | Fix IOException condition for openInfo in DFSInputStream | Major | . | legend | legend |
+| [HDFS-12614](https://issues.apache.org/jira/browse/HDFS-12614) | FSPermissionChecker#getINodeAttrs() throws NPE when INodeAttributesProvider configured | Major | . | Manoj Govindassamy | Manoj Govindassamy |
+| [HDFS-12788](https://issues.apache.org/jira/browse/HDFS-12788) | Reset the upload button when file upload fails | Critical | ui, webhdfs | Brahma Reddy Battula | Brahma Reddy Battula |
+| [YARN-7388](https://issues.apache.org/jira/browse/YARN-7388) | TestAMRestart should be scheduler agnostic | Major | . | Haibo Chen | Haibo Chen |
+| [HDFS-12705](https://issues.apache.org/jira/browse/HDFS-12705) | WebHdfsFileSystem exceptions should retain the caused by exception | Major | hdfs | Daryn Sharp | Hanisha Koneru |
+| [YARN-7361](https://issues.apache.org/jira/browse/YARN-7361) | Improve the docker container runtime documentation | Major | . | Shane Kumpf | Shane Kumpf |
+| [YARN-7469](https://issues.apache.org/jira/browse/YARN-7469) | Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit | Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
+| [YARN-7489](https://issues.apache.org/jira/browse/YARN-7489) | ConcurrentModificationException in RMAppImpl#getRMAppMetrics | Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7525](https://issues.apache.org/jira/browse/YARN-7525) | Incorrect query parameters in cluster nodes REST API document | Minor | documentation | Tao Yang | Tao Yang |
+| [HADOOP-15045](https://issues.apache.org/jira/browse/HADOOP-15045) | ISA-L build options are documented in branch-2 | Major | build, documentation | Akira Ajisaka | Akira Ajisaka |
+| [YARN-7390](https://issues.apache.org/jira/browse/YARN-7390) | All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. | Major | fairscheduler, reservation system | Yufei Gu | Yufei Gu |
+| [HDFS-12754](https://issues.apache.org/jira/browse/HDFS-12754) | Lease renewal can hit a deadlock | Major | . | Kuhu Shukla | Kuhu Shukla |
+| [HDFS-12832](https://issues.apache.org/jira/browse/HDFS-12832) | INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit | Critical | namenode | DENG FEI | Konstantin Shvachko |
+| [HDFS-11754](https://issues.apache.org/jira/browse/HDFS-11754) | Make FsServerDefaults cache configurable. | Minor | . | Rushabh S Shah | Mikhail Erofeev |
+| [YARN-7509](https://issues.apache.org/jira/browse/YARN-7509) | AsyncScheduleThread and ResourceCommitterService are still running after RM is transitioned to standby | Critical | . | Tao Yang | Tao Yang |
+| [YARN-7558](https://issues.apache.org/jira/browse/YARN-7558) | "yarn logs" command fails to get logs for running containers if UI authentication is enabled. | Critical | . | Namit Maheshwari | Xuan Gong |
+| [HDFS-12638](https://issues.apache.org/jira/browse/HDFS-12638) | Delete copy-on-truncate block along with the original block, when deleting a file being truncated | Blocker | hdfs | Jiandan Yang | Konstantin Shvachko |
+| [MAPREDUCE-5124](https://issues.apache.org/jira/browse/MAPREDUCE-5124) | AM lacks flow control for task events | Major | mr-am | Jason Lowe | Peter Bacsko |
+| [YARN-7455](https://issues.apache.org/jira/browse/YARN-7455) | quote\_and\_append\_arg can overflow buffer | Major | nodemanager | Jason Lowe | Jim Brennan |
+| [HADOOP-14985](https://issues.apache.org/jira/browse/HADOOP-14985) | Remove subversion related code from VersionInfoMojo.java | Minor | build | Akira Ajisaka | Ajay Kumar |
+| [HDFS-12889](https://issues.apache.org/jira/browse/HDFS-12889) | Router UI is missing robots.txt file | Major | . | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-11576](https://issues.apache.org/jira/browse/HDFS-11576) | Block recovery will fail indefinitely if recovery time \> heartbeat interval | Critical | datanode, hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [YARN-7607](https://issues.apache.org/jira/browse/YARN-7607) | Remove the trailing duplicated timestamp in container diagnostics message | Minor | nodemanager | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15080](https://issues.apache.org/jira/browse/HADOOP-15080) | Aliyun OSS: update oss sdk from 2.8.1 to 2.8.3 to remove its dependency on Cat-x "json-lib" | Blocker | fs/oss | Chris Douglas | SammiChen |
+| [YARN-7591](https://issues.apache.org/jira/browse/YARN-7591) | NPE in async-scheduling mode of CapacityScheduler | Critical | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7608](https://issues.apache.org/jira/browse/YARN-7608) | Incorrect sTarget column causing DataTable warning on RM application and scheduler web page | Major | resourcemanager, webapp | Weiwei Yang | Gergely Novák |
+| [HDFS-12833](https://issues.apache.org/jira/browse/HDFS-12833) | Distcp : Update the usage of delete option for dependency with update and overwrite option | Minor | distcp, hdfs | Harshakiran Reddy | usharani |
+| [YARN-7647](https://issues.apache.org/jira/browse/YARN-7647) | NM print inappropriate error log when node-labels is enabled | Minor | . | Yang Wang | Yang Wang |
+| [HDFS-12907](https://issues.apache.org/jira/browse/HDFS-12907) | Allow read-only access to reserved raw for non-superusers | Major | namenode | Daryn Sharp | Rushabh S Shah |
+| [HDFS-12881](https://issues.apache.org/jira/browse/HDFS-12881) | Output streams closed with IOUtils suppressing write errors | Major | . | Jason Lowe | Ajay Kumar |
+| [YARN-7595](https://issues.apache.org/jira/browse/YARN-7595) | Container launching code suppresses close exceptions after writes | Major | nodemanager | Jason Lowe | Jim Brennan |
+| [HADOOP-15085](https://issues.apache.org/jira/browse/HADOOP-15085) | Output streams closed with IOUtils suppressing write errors | Major | . | Jason Lowe | Jim Brennan |
+| [YARN-7661](https://issues.apache.org/jira/browse/YARN-7661) | NodeManager metrics return wrong value after update node resource | Major | . | Yang Wang | Yang Wang |
+| [HDFS-12347](https://issues.apache.org/jira/browse/HDFS-12347) | TestBalancerRPCDelay#testBalancerRPCDelay fails very frequently | Critical | test | Xiao Chen | Bharat Viswanadham |
+| [YARN-7542](https://issues.apache.org/jira/browse/YARN-7542) | Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED | Major | . | Arun Suresh | Sampada Dehankar |
+| [HADOOP-15143](https://issues.apache.org/jira/browse/HADOOP-15143) | NPE due to Invalid KerberosTicket in UGI | Major | . | Jitendra Nath Pandey | Mukul Kumar Singh |
+| [YARN-7692](https://issues.apache.org/jira/browse/YARN-7692) | Skip validating priority acls while recovering applications | Blocker | resourcemanager | Charan Hebri | Sunil G |
+| [MAPREDUCE-7028](https://issues.apache.org/jira/browse/MAPREDUCE-7028) | Concurrent task progress updates causing NPE in Application Master | Blocker | mr-am | Gergo Repas | Gergo Repas |
+| [YARN-7619](https://issues.apache.org/jira/browse/YARN-7619) | Max AM Resource value in Capacity Scheduler UI has to be refreshed for every user | Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
+| [YARN-7699](https://issues.apache.org/jira/browse/YARN-7699) | queueUsagePercentage is coming as INF for getApp REST api call | Major | webapp | Sunil G | Sunil G |
+| [YARN-7508](https://issues.apache.org/jira/browse/YARN-7508) | NPE in FiCaSchedulerApp when debug log enabled in async-scheduling mode | Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7663](https://issues.apache.org/jira/browse/YARN-7663) | RMAppImpl:Invalid event: START at KILLED | Minor | resourcemanager | lujie | lujie |
+| [YARN-6948](https://issues.apache.org/jira/browse/YARN-6948) | Invalid event: ATTEMPT\_ADDED at FINAL\_SAVING | Minor | yarn | lujie | lujie |
+| [YARN-7735](https://issues.apache.org/jira/browse/YARN-7735) | Fix typo in YARN documentation | Minor | documentation | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-7727](https://issues.apache.org/jira/browse/YARN-7727) | Incorrect log levels in few logs with QueuePriorityContainerCandidateSelector | Minor | yarn | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-11915](https://issues.apache.org/jira/browse/HDFS-11915) | Sync rbw dir on the first hsync() to avoid file lost on power failure | Critical | . | Kanaka Kumar Avvaru | Vinayakumar B |
+| [HDFS-9049](https://issues.apache.org/jira/browse/HDFS-9049) | Make Datanode Netty reverse proxy port to be configurable | Major | datanode | Vinayakumar B | Vinayakumar B |
+| [HADOOP-15150](https://issues.apache.org/jira/browse/HADOOP-15150) | in FsShell, UGI params should be overidden through env vars(-D arg) | Major | . | Brahma Reddy Battula | Brahma Reddy Battula |
+| [HADOOP-15181](https://issues.apache.org/jira/browse/HADOOP-15181) | Typo in SecureMode.md | Trivial | documentation | Masahiro Tanaka | Masahiro Tanaka |
+| [YARN-7737](https://issues.apache.org/jira/browse/YARN-7737) | prelaunch.err file not found exception on container failure | Major | . | Jonathan Hung | Keqiu Hu |
+| [HDFS-13063](https://issues.apache.org/jira/browse/HDFS-13063) | Fix the incorrect spelling in HDFSHighAvailabilityWithQJM.md | Trivial | documentation | Jianfei Jiang | Jianfei Jiang |
+| [YARN-7102](https://issues.apache.org/jira/browse/YARN-7102) | NM heartbeat stuck when responseId overflows MAX\_INT | Critical | . | Botong Huang | Botong Huang |
+| [HADOOP-15151](https://issues.apache.org/jira/browse/HADOOP-15151) | MapFile.fix creates a wrong index file in case of block-compressed data file. | Major | common | Grigori Rybkine | Grigori Rybkine |
+| [MAPREDUCE-7020](https://issues.apache.org/jira/browse/MAPREDUCE-7020) | Task timeout in uber mode can crash AM | Major | mr-am | Akira Ajisaka | Peter Bacsko |
+| [YARN-7698](https://issues.apache.org/jira/browse/YARN-7698) | A misleading variable's name in ApplicationAttemptEventDispatcher | Minor | resourcemanager | Jinjiang Ling | Jinjiang Ling |
+| [HDFS-13100](https://issues.apache.org/jira/browse/HDFS-13100) | Handle IllegalArgumentException when GETSERVERDEFAULTS is not implemented in webhdfs. | Critical | hdfs, webhdfs | Yongjun Zhang | Yongjun Zhang |
+| [YARN-6868](https://issues.apache.org/jira/browse/YARN-6868) | Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml | Major | yarn | Ray Chiang | Ray Chiang |
+| [YARN-7849](https://issues.apache.org/jira/browse/YARN-7849) | TestMiniYarnClusterNodeUtilization#testUpdateNodeUtilization fails due to heartbeat sync error | Major | test | Jason Lowe | Botong Huang |
+| [YARN-7801](https://issues.apache.org/jira/browse/YARN-7801) | AmFilterInitializer should addFilter after fill all parameters | Critical | . | Sumana Sathish | Wangda Tan |
+| [YARN-7890](https://issues.apache.org/jira/browse/YARN-7890) | NPE during container relaunch | Major | . | Billie Rinaldi | Jason Lowe |
+| [HDFS-12935](https://issues.apache.org/jira/browse/HDFS-12935) | Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up | Major | tools | Jianfei Jiang | Jianfei Jiang |
+| [HDFS-13120](https://issues.apache.org/jira/browse/HDFS-13120) | Snapshot diff could be corrupted after concat | Major | namenode, snapshots | Xiaoyu Yao | Xiaoyu Yao |
+| [HDFS-10453](https://issues.apache.org/jira/browse/HDFS-10453) | ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster. | Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [HDFS-8693](https://issues.apache.org/jira/browse/HDFS-8693) | refreshNamenodes does not support adding a new standby to a running DN | Critical | datanode, ha | Jian Fang | Ajith S |
+| [MAPREDUCE-7052](https://issues.apache.org/jira/browse/MAPREDUCE-7052) | TestFixedLengthInputFormat#testFormatCompressedIn is flaky | Major | client, test | Peter Bacsko | Peter Bacsko |
+| [HDFS-13112](https://issues.apache.org/jira/browse/HDFS-13112) | Token expiration edits may cause log corruption or deadlock | Critical | namenode | Daryn Sharp | Daryn Sharp |
+| [MAPREDUCE-7053](https://issues.apache.org/jira/browse/MAPREDUCE-7053) | Timed out tasks can fail to produce thread dump | Major | . | Jason Lowe | Jason Lowe |
+| [HADOOP-15206](https://issues.apache.org/jira/browse/HADOOP-15206) | BZip2 drops and duplicates records when input split size is small | Major | . | Aki Tanaka | Aki Tanaka |
+| [YARN-7947](https://issues.apache.org/jira/browse/YARN-7947) | Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [YARN-7945](https://issues.apache.org/jira/browse/YARN-7945) | Java Doc error in UnmanagedAMPoolManager for branch-2 | Major | . | Rohith Sharma K S | Botong Huang |
+| [HADOOP-14903](https://issues.apache.org/jira/browse/HADOOP-14903) | Add json-smart explicitly to pom.xml | Major | common | Ray Chiang | Ray Chiang |
+| [HDFS-12781](https://issues.apache.org/jira/browse/HDFS-12781) | After Datanode down, In Namenode UI Datanode tab is throwing warning message. | Major | datanode | Harshakiran Reddy | Brahma Reddy Battula |
+| [HDFS-12070](https://issues.apache.org/jira/browse/HDFS-12070) | Failed block recovery leaves files open indefinitely and at risk for data loss | Major | . | Daryn Sharp | Kihwal Lee |
+| [HADOOP-15251](https://issues.apache.org/jira/browse/HADOOP-15251) | Backport HADOOP-13514 (surefire upgrade) to branch-2 | Major | test | Chris Douglas | Chris Douglas |
+| [HADOOP-15275](https://issues.apache.org/jira/browse/HADOOP-15275) | Incorrect javadoc for return type of RetryPolicy#shouldRetry | Minor | documentation | Nanda kumar | Nanda kumar |
+| [YARN-7511](https://issues.apache.org/jira/browse/YARN-7511) | NPE in ContainerLocalizer when localization failed for running container | Major | nodemanager | Tao Yang | Tao Yang |
+| [MAPREDUCE-7023](https://issues.apache.org/jira/browse/MAPREDUCE-7023) | TestHadoopArchiveLogs.testCheckFilesAndSeedApps fails on rerun | Minor | test | Gergely Novák | Gergely Novák |
+| [HADOOP-15283](https://issues.apache.org/jira/browse/HADOOP-15283) | Upgrade from findbugs 3.0.1 to spotbugs 3.1.2 in branch-2 to fix docker image build | Major | . | Xiao Chen | Akira Ajisaka |
+| [YARN-7736](https://issues.apache.org/jira/browse/YARN-7736) | Fix itemization in YARN federation document | Minor | documentation | Akira Ajisaka | Sen Zhao |
+| [HDFS-13164](https://issues.apache.org/jira/browse/HDFS-13164) | File not closed if streamer fail with DSQuotaExceededException | Major | hdfs-client | Xiao Chen | Xiao Chen |
+| [HDFS-13109](https://issues.apache.org/jira/browse/HDFS-13109) | Support fully qualified hdfs path in EZ commands | Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [MAPREDUCE-6930](https://issues.apache.org/jira/browse/MAPREDUCE-6930) | mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml | Major | mrv2 | Daniel Templeton | Sen Zhao |
+| [HDFS-12156](https://issues.apache.org/jira/browse/HDFS-12156) | TestFSImage fails without -Pnative | Major | test | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15308](https://issues.apache.org/jira/browse/HADOOP-15308) | TestConfiguration fails on Windows because of paths | Major | . | Íñigo Goiri | Xiao Liang |
+| [YARN-7636](https://issues.apache.org/jira/browse/YARN-7636) | Re-reservation count may overflow when cluster resource exhausted for a long time | Major | capacityscheduler | Tao Yang | Tao Yang |
+| [HDFS-12886](https://issues.apache.org/jira/browse/HDFS-12886) | Ignore minReplication for block recovery | Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [HDFS-13296](https://issues.apache.org/jira/browse/HDFS-13296) | GenericTestUtils generates paths with drive letter in Windows and fail webhdfs related test cases | Major | . | Xiao Liang | Xiao Liang |
+| [HDFS-13268](https://issues.apache.org/jira/browse/HDFS-13268) | TestWebHdfsFileContextMainOperations fails on Windows | Major | . | Íñigo Goiri | Xiao Liang |
+| [YARN-8054](https://issues.apache.org/jira/browse/YARN-8054) | Improve robustness of the LocalDirsHandlerService MonitoringTimerTask thread | Major | . | Jonathan Eagles | Jonathan Eagles |
+| [YARN-7873](https://issues.apache.org/jira/browse/YARN-7873) | Revert YARN-6078 | Blocker | . | Billie Rinaldi | Billie Rinaldi |
+| [HDFS-13195](https://issues.apache.org/jira/browse/HDFS-13195) | DataNode conf page cannot display the current value after reconfig | Minor | datanode | maobaolong | maobaolong |
+| [HADOOP-15320](https://issues.apache.org/jira/browse/HADOOP-15320) | Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake | Major | fs/adl, fs/azure | shanyu zhao | shanyu zhao |
+| [HADOOP-12862](https://issues.apache.org/jira/browse/HADOOP-12862) | LDAP Group Mapping over SSL can not specify trust store | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HDFS-13427](https://issues.apache.org/jira/browse/HDFS-13427) | Fix the section titles of transparent encryption document | Minor | documentation | Akira Ajisaka | Akira Ajisaka |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14696](https://issues.apache.org/jira/browse/HADOOP-14696) | parallel tests don't work for Windows | Minor | test | Allen Wittenauer | Allen Wittenauer |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13481](https://issues.apache.org/jira/browse/HADOOP-13481) | User end documents for Aliyun OSS FileSystem | Minor | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-13591](https://issues.apache.org/jira/browse/HADOOP-13591) | Unit test failure in TestOSSContractGetFileStatus and TestOSSContractRootDir | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-13624](https://issues.apache.org/jira/browse/HADOOP-13624) | Rename TestAliyunOSSContractDispCp | Major | fs, fs/oss | Kai Zheng | Genmao Yu |
+| [HADOOP-14065](https://issues.apache.org/jira/browse/HADOOP-14065) | AliyunOSS: oss directory filestatus should use meta time | Major | fs/oss | Fei Hui | Fei Hui |
+| [HADOOP-13768](https://issues.apache.org/jira/browse/HADOOP-13768) | AliyunOSS: handle the failure in the batch delete operation `deleteDirs`. | Major | fs | Genmao Yu | Genmao Yu |
+| [HADOOP-14069](https://issues.apache.org/jira/browse/HADOOP-14069) | AliyunOSS: listStatus returns wrong file info | Major | fs/oss | Fei Hui | Fei Hui |
+| [HADOOP-13769](https://issues.apache.org/jira/browse/HADOOP-13769) | AliyunOSS: update oss sdk version | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14072](https://issues.apache.org/jira/browse/HADOOP-14072) | AliyunOSS: Failed to read from stream when seek beyond the download size | Major | fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14192](https://issues.apache.org/jira/browse/HADOOP-14192) | Aliyun OSS FileSystem contract test should implement getTestBaseDir() | Major | fs/oss | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14194](https://issues.apache.org/jira/browse/HADOOP-14194) | Aliyun OSS should not use empty endpoint as default | Major | fs/oss | Mingliang Liu | Genmao Yu |
+| [HADOOP-14787](https://issues.apache.org/jira/browse/HADOOP-14787) | AliyunOSS: Implement the `createNonRecursive` operator | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14649](https://issues.apache.org/jira/browse/HADOOP-14649) | Update aliyun-sdk-oss version to 2.8.1 | Major | fs/oss | Ray Chiang | Genmao Yu |
+| [HADOOP-14799](https://issues.apache.org/jira/browse/HADOOP-14799) | Update nimbus-jose-jwt to 4.41.1 | Major | . | Ray Chiang | Ray Chiang |
+| [HADOOP-14997](https://issues.apache.org/jira/browse/HADOOP-14997) | Add hadoop-aliyun as dependency of hadoop-cloud-storage | Minor | fs/oss | Genmao Yu | Genmao Yu |
+| [HDFS-12801](https://issues.apache.org/jira/browse/HDFS-12801) | RBF: Set MountTableResolver as default file resolver | Minor | . | Íñigo Goiri | Íñigo Goiri |
+| [YARN-7430](https://issues.apache.org/jira/browse/YARN-7430) | Enable user re-mapping for Docker containers by default | Blocker | security, yarn | Eric Yang | Eric Yang |
+| [YARN-6128](https://issues.apache.org/jira/browse/YARN-6128) | Add support for AMRMProxy HA | Major | amrmproxy, nodemanager | Subru Krishnan | Botong Huang |
+| [HADOOP-15024](https://issues.apache.org/jira/browse/HADOOP-15024) | AliyunOSS: support user agent configuration and include that & Hadoop version information to oss server | Major | fs, fs/oss | SammiChen | SammiChen |
+| [HDFS-12858](https://issues.apache.org/jira/browse/HDFS-12858) | RBF: Add router admin commands usage in HDFS commands reference doc | Minor | documentation | Yiqun Lin | Yiqun Lin |
+| [HDFS-12835](https://issues.apache.org/jira/browse/HDFS-12835) | RBF: Fix Javadoc parameter errors | Minor | . | Wei Yan | Wei Yan |
+| [YARN-7587](https://issues.apache.org/jira/browse/YARN-7587) | Skip dispatching opportunistic containers to nodes whose queue is already full | Major | . | Weiwei Yang | Weiwei Yang |
+| [HDFS-12396](https://issues.apache.org/jira/browse/HDFS-12396) | Webhdfs file system should get delegation token from kms provider. | Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
+| [YARN-6704](https://issues.apache.org/jira/browse/YARN-6704) | Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService | Major | . | Botong Huang | Botong Huang |
+| [HDFS-12875](https://issues.apache.org/jira/browse/HDFS-12875) | RBF: Complete logic for -readonly option of dfsrouteradmin add command | Major | . | Yiqun Lin | Íñigo Goiri |
+| [YARN-7630](https://issues.apache.org/jira/browse/YARN-7630) | Fix AMRMToken rollover handling in AMRMProxy | Minor | . | Botong Huang | Botong Huang |
+| [HDFS-12937](https://issues.apache.org/jira/browse/HDFS-12937) | RBF: Add more unit tests for router admin commands | Major | test | Yiqun Lin | Yiqun Lin |
+| [HDFS-12988](https://issues.apache.org/jira/browse/HDFS-12988) | RBF: Mount table entries not properly updated in the local cache | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | backport HADOOP-15086 rename fix to branch-2 | Major | fs/azure | Thomas Marquardt | Thomas Marquardt |
+| [YARN-7716](https://issues.apache.org/jira/browse/YARN-7716) | metricsTimeStart and metricsTimeEnd should be all lower case in the doc | Major | timelinereader | Haibo Chen | Haibo Chen |
+| [HDFS-12802](https://issues.apache.org/jira/browse/HDFS-12802) | RBF: Control MountTableResolver cache size | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance | Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-13028](https://issues.apache.org/jira/browse/HDFS-13028) | RBF: Fix spurious TestRouterRpc#testProxyGetStats | Minor | . | Íñigo Goiri | Íñigo Goiri |
+| [YARN-5094](https://issues.apache.org/jira/browse/YARN-5094) | some YARN container events have timestamp of -1 | Critical | . | Sangjin Lee | Haibo Chen |
+| [YARN-7782](https://issues.apache.org/jira/browse/YARN-7782) | Enable user re-mapping for Docker containers in yarn-default.xml | Blocker | security, yarn | Eric Yang | Eric Yang |
+| [HDFS-12772](https://issues.apache.org/jira/browse/HDFS-12772) | RBF: Federation Router State State Store internal API | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13042](https://issues.apache.org/jira/browse/HDFS-13042) | RBF: Heartbeat Router State | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13049](https://issues.apache.org/jira/browse/HDFS-13049) | RBF: Inconsistent Router OPTS config in branch-2 and branch-3 | Minor | . | Wei Yan | Wei Yan |
+| [HDFS-12574](https://issues.apache.org/jira/browse/HDFS-12574) | Add CryptoInputStream to WebHdfsFileSystem read call. | Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
+| [HDFS-13044](https://issues.apache.org/jira/browse/HDFS-13044) | RBF: Add a safe mode for the Router | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13043](https://issues.apache.org/jira/browse/HDFS-13043) | RBF: Expose the state of the Routers in the federation | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13068](https://issues.apache.org/jira/browse/HDFS-13068) | RBF: Add router admin option to manage safe mode | Major | . | Íñigo Goiri | Yiqun Lin |
+| [HDFS-13119](https://issues.apache.org/jira/browse/HDFS-13119) | RBF: Manage unavailable clusters | Major | . | Íñigo Goiri | Yiqun Lin |
+| [HDFS-13187](https://issues.apache.org/jira/browse/HDFS-13187) | RBF: Fix Routers information shown in the web UI | Minor | . | Wei Yan | Wei Yan |
+| [HDFS-13184](https://issues.apache.org/jira/browse/HDFS-13184) | RBF: Improve the unit test TestRouterRPCClientRetries | Minor | test | Yiqun Lin | Yiqun Lin |
+| [HDFS-13199](https://issues.apache.org/jira/browse/HDFS-13199) | RBF: Fix the hdfs router page missing label icon issue | Major | federation, hdfs | maobaolong | maobaolong |
+| [HDFS-13214](https://issues.apache.org/jira/browse/HDFS-13214) | RBF: Complete document of Router configuration | Major | . | Tao Jie | Yiqun Lin |
+| [HDFS-13230](https://issues.apache.org/jira/browse/HDFS-13230) | RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns | Minor | . | Wei Yan | Chao Sun |
+| [HDFS-13233](https://issues.apache.org/jira/browse/HDFS-13233) | RBF: MountTableResolver doesn't return the correct mount point of the given path | Major | hdfs | wangzhiyuan | wangzhiyuan |
+| [HDFS-13212](https://issues.apache.org/jira/browse/HDFS-13212) | RBF: Fix router location cache issue | Major | federation, hdfs | Weiwei Wu | Weiwei Wu |
+| [HDFS-13232](https://issues.apache.org/jira/browse/HDFS-13232) | RBF: ConnectionPool should return first usable connection | Minor | . | Wei Yan | Ekanth S |
+| [HDFS-13240](https://issues.apache.org/jira/browse/HDFS-13240) | RBF: Update some inaccurate document descriptions | Minor | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-11399](https://issues.apache.org/jira/browse/HDFS-11399) | Many tests fails in Windows due to injecting disk failures | Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13241](https://issues.apache.org/jira/browse/HDFS-13241) | RBF: TestRouterSafemode failed if the port 8888 is in use | Major | hdfs, test | maobaolong | maobaolong |
+| [HDFS-13253](https://issues.apache.org/jira/browse/HDFS-13253) | RBF: Quota management incorrect parent-child relationship judgement | Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13226](https://issues.apache.org/jira/browse/HDFS-13226) | RBF: Throw the exception if mount table entry validated failed | Major | hdfs | maobaolong | maobaolong |
+| [HDFS-12773](https://issues.apache.org/jira/browse/HDFS-12773) | RBF: Improve State Store FS implementation | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13198](https://issues.apache.org/jira/browse/HDFS-13198) | RBF: RouterHeartbeatService throws out CachedStateStore related exceptions when starting router | Minor | . | Wei Yan | Wei Yan |
+| [HDFS-13224](https://issues.apache.org/jira/browse/HDFS-13224) | RBF: Resolvers to support mount points across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-15262](https://issues.apache.org/jira/browse/HADOOP-15262) | AliyunOSS: move files under a directory in parallel when rename a directory | Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-13215](https://issues.apache.org/jira/browse/HDFS-13215) | RBF: Move Router to its own module | Major | . | Íñigo Goiri | Wei Yan |
+| [HDFS-13250](https://issues.apache.org/jira/browse/HDFS-13250) | RBF: Router to manage requests across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13318](https://issues.apache.org/jira/browse/HDFS-13318) | RBF: Fix FindBugs in hadoop-hdfs-rbf | Minor | . | Íñigo Goiri | Ekanth S |
+| [HDFS-12792](https://issues.apache.org/jira/browse/HDFS-12792) | RBF: Test Router-based federation using HDFSContract | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-12512](https://issues.apache.org/jira/browse/HDFS-12512) | RBF: Add WebHDFS | Major | fs | Íñigo Goiri | Wei Yan |
+| [HDFS-13291](https://issues.apache.org/jira/browse/HDFS-13291) | RBF: Implement available space based OrderResolver | Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13204](https://issues.apache.org/jira/browse/HDFS-13204) | RBF: Optimize name service safe mode icon | Minor | . | liuhongtong | liuhongtong |
+| [HDFS-13352](https://issues.apache.org/jira/browse/HDFS-13352) | RBF: Add xsl stylesheet for hdfs-rbf-default.xml | Major | documentation | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-8010](https://issues.apache.org/jira/browse/YARN-8010) | Add config in FederationRMFailoverProxy to not bypass facade cache when failing over | Minor | . | Botong Huang | Botong Huang |
+| [HDFS-13347](https://issues.apache.org/jira/browse/HDFS-13347) | RBF: Cache datanode reports | Minor | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13289](https://issues.apache.org/jira/browse/HDFS-13289) | RBF: TestConnectionManager#testCleanup() test case need correction | Minor | . | Dibyendu Karmakar | Dibyendu Karmakar |
+| [HDFS-13364](https://issues.apache.org/jira/browse/HDFS-13364) | RBF: Support NamenodeProtocol in the Router | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-14651](https://issues.apache.org/jira/browse/HADOOP-14651) | Update okhttp version to 2.7.5 | Major | fs/adl | Ray Chiang | Ray Chiang |
+| [HADOOP-14999](https://issues.apache.org/jira/browse/HADOOP-14999) | AliyunOSS: provide one asynchronous multi-part based uploading mechanism | Major | fs/oss | Genmao Yu | Genmao Yu |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15149](https://issues.apache.org/jira/browse/HADOOP-15149) | CryptoOutputStream should implement StreamCapabilities | Major | fs | Mike Drob | Xiao Chen |
+| [YARN-7691](https://issues.apache.org/jira/browse/YARN-7691) | Add Unit Tests for ContainersLauncher | Major | . | Sampada Dehankar | Sampada Dehankar |
+| [HADOOP-15177](https://issues.apache.org/jira/browse/HADOOP-15177) | Update the release year to 2018 | Blocker | build | Akira Ajisaka | Bharat Viswanadham |
+
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f544b0/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
new file mode 100644
index 0000000..bed70b1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
@@ -0,0 +1,88 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop" 2.9.1 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | *Major* | **Incorporate Aliyun OSS file system implementation**
+
+Aliyun OSS is widely used among China’s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | *Major* | **AliyunOSS: backport Aliyun OSS module to branch-2**
+
+Aliyun OSS is widely used among China’s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss:// scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | **RBF: Document Router and State Store metrics**
+
+This JIRA makes following change:
+Change Router metrics context from 'router' to 'dfs'.
+
+
+---
+
+* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | **RBF: Add ACL support for mount table**
+
+Mount tables support ACL, The users won't be able to modify their own entries (we are assuming these old (no-permissions before) mount table with owner:superuser, group:supergroup, permission:755 as the default permissions). The fix way is login as superuser to modify these mount table entries.
+
+
+---
+
+* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | **Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath**
+
+Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath.
+
+
+---
+
+* [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | *Major* | **backport HADOOP-15086 rename fix to branch-2**
+
+[WASB] Fix Azure implementation of Filesystem.rename to ensure that at most one operation succeeds when there are multiple, concurrent rename operations targeting the same destination file.
+
+
+---
+
+* [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | *Major* | **AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance**
+
+Support multi-thread pre-read in AliyunOSSInputStream to improve the sequential read performance from Hadoop to Aliyun OSS.
+
+
+---
+
+* [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | *Major* | **RBF: Fix doc error setting up client**
+
+Fix the document error of setting up HFDS Router Federation
+
+
+---
+
+* [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | *Minor* | **RBF: Use the ZooKeeper as the default State Store**
+
+Change default State Store from local file to ZooKeeper. This will require additional zk address to be configured.
+
+
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f544b0/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
new file mode 100644
index 0000000..a5d87c7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
@@ -0,0 +1,312 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Mon Apr 16 12:03:07 UTC 2018 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="Apache Hadoop HDFS 2.9.1"
+ jdversion="1.0.9">
+
+<!-- Command line arguments = -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.9.1.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.9.1.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/4.41.1/nimbus-jose-jwt-4.41.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/1.3.1/json-smart-1.3.1.jar:/maven/org/apache/directory/serv
er/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.9.1.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus
/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/apache/commons/commons-lang3/3.4/commons-lang3-3.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoug
htworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.5/snappy-java-1.0.5.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-2.9.1.jar:/maven/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar:/maven/com/squareup/okio/okio/1.6.0/okio-1.6.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortb
ay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/xmlenc/xmlenc/0.52/xmlenc
-0.52.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.9.1 -->
+<package name="org.apache.hadoop.hdfs">
+ <doc>
+ <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files
+have strictly one writer at any one time. Bytes are always appended
+to the end of the writer's stream. There is no notion of "record appends"
+or "mutations" that are then checked or reordered. Writers simply emit
+a byte stream. That byte stream is guaranteed to be stored in the
+order written.</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+ <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+ <interface name="JournalNodeMXBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getJournalsStatus" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+
+ @return A string presenting status for each journal]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for JournalNode information]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+ <interface name="AuditLogger" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+ </doc>
+ </method>
+ <method name="logAuditEvent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="succeeded" type="boolean"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="cmd" type="java.lang.String"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+ <doc>
+ <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+ metadata (permissions, owner, times, etc).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface defining an audit logger.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+ <class name="HdfsAuditLogger" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+ <constructor name="HdfsAuditLogger"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logAuditEvent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="succeeded" type="boolean"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="cmd" type="java.lang.String"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+ </method>
+ <method name="logAuditEvent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="succeeded" type="boolean"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="cmd" type="java.lang.String"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+ <doc>
+ <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+ (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+ token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+ token tracking information]]>
+ </doc>
+ </method>
+ <method name="logAuditEvent"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="succeeded" type="boolean"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="cmd" type="java.lang.String"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+ <doc>
+ <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Extension of {@link AuditLogger}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+ <class name="INodeAttributeProvider" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INodeAttributeProvider"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="start"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+ </doc>
+ </method>
+ <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullPath" type="java.lang.String"/>
+ <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+ </method>
+ <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathElements" type="java.lang.String[]"/>
+ <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+ </method>
+ <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="components" type="byte[][]"/>
+ <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+ </method>
+ <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+ <doc>
+ <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f544b0/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index cfaa698..5f83da3 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
<activeByDefault>false</activeByDefault>
</activation>
<properties>
- <jdiff.stable.api>3.0.2</jdiff.stable.api>
+ <jdiff.stable.api>2.9.1</jdiff.stable.api>
<jdiff.stability>-unstable</jdiff.stability>
<!-- Commented out for HADOOP-11776 -->
<!-- Uncomment param name="${jdiff.compatibility}" in javadoc doclet if compatibility is not empty -->
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[41/50] [abbrv] hadoop git commit: HADOOP-15442.
ITestS3AMetrics.testMetricsRegister can't know metrics source's name.
Contributed by Sean Mackrory.
Posted by xy...@apache.org.
HADOOP-15442. ITestS3AMetrics.testMetricsRegister can't know metrics source's name.
Contributed by Sean Mackrory.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6708374
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6708374
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6708374
Branch: refs/heads/HDDS-4
Commit: b6708374692e6c4d786e2f3f1f45cc7aa1e4e88f
Parents: 2bb647b
Author: Steve Loughran <st...@apache.org>
Authored: Tue May 15 16:13:56 2018 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue May 15 16:13:56 2018 +0100
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java | 3 ---
.../src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java | 5 -----
2 files changed, 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6708374/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
index 29ee0c5..26ecefd 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
@@ -267,9 +267,6 @@ public class S3AInstrumentation implements Closeable, MetricsSource {
number = ++metricsSourceNameCounter;
}
String msName = METRICS_SOURCE_BASENAME + number;
- if (number > 1) {
- msName = msName + number;
- }
metricsSourceName = msName + "-" + name.getHost();
metricsSystem.register(metricsSourceName, "", this);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6708374/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java
index e92ce78..972c665 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java
@@ -38,11 +38,6 @@ public class ITestS3AMetrics extends AbstractS3ATestBase {
Path dest = path("testMetricsRegister");
ContractTestUtils.touch(fs, dest);
- String targetMetricSource = "S3AMetrics1" + "-" + fs.getBucket();
- assertNotNull("No metrics under test fs for " + targetMetricSource,
- fs.getInstrumentation().getMetricsSystem()
- .getSource(targetMetricSource));
-
MutableCounterLong fileCreated =
(MutableCounterLong) fs.getInstrumentation().getRegistry()
.get(Statistic.FILES_CREATED.getSymbol());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[27/50] [abbrv] hadoop git commit: Add 2.9.1 release notes and
changes documents
Posted by xy...@apache.org.
Add 2.9.1 release notes and changes documents
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4dc346d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4dc346d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4dc346d
Branch: refs/heads/HDDS-4
Commit: e4dc346d651de4c9af05a9616f8fe6369895d8af
Parents: 32cbd0c
Author: littlezhou <we...@intel.com>
Authored: Mon May 14 14:24:01 2018 +0800
Committer: littlezhou <we...@intel.com>
Committed: Mon May 14 14:24:01 2018 +0800
----------------------------------------------------------------------
.../markdown/release/2.9.1/CHANGES.2.9.1.md | 277 ++++++++++++++++
.../release/2.9.1/RELEASENOTES.2.9.1.md | 88 ++++++
.../jdiff/Apache_Hadoop_HDFS_2.9.1.xml | 312 +++++++++++++++++++
hadoop-project-dist/pom.xml | 2 +-
4 files changed, 678 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dc346d/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
new file mode 100644
index 0000000..c5e53f6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
@@ -0,0 +1,277 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop" Changelog
+
+## Release 2.9.1 - 2018-04-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: Document Router and State Store metrics | Major | documentation | Yiqun Lin | Yiqun Lin |
+| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add ACL support for mount table | Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath | Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun Saxena |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use the ZooKeeper as the default State Store | Minor | documentation | Yiqun Lin | Yiqun Lin |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix doc error setting up client | Major | federation | tartarus | tartarus |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | Incorporate Aliyun OSS file system implementation | Major | fs, fs/oss | shimingfei | mingfei.shi |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | CryptoInputStream should implement unbuffer | Major | fs, security | John Zhuge | John Zhuge |
+| [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | AliyunOSS: backport Aliyun OSS module to branch-2 | Major | fs/oss | Genmao Yu | SammiChen |
+| [YARN-6851](https://issues.apache.org/jira/browse/YARN-6851) | Capacity Scheduler: document configs for controlling # containers allowed to be allocated per node heartbeat | Minor | . | Wei Yan | Wei Yan |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve robustness of the AggregatedLogDeletionService | Major | log-aggregation | Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager web UI should display container type in containers page | Major | nodemanager, webapp | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix TestUnbuffer#testUnbufferException failure | Minor | test | Jack Bearden | Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add readahead, dropbehind, and unbuffer to StreamCapabilities | Major | fs | John Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | AliyunOSS: change the default value of max error retry | Major | fs/oss | wujinhu | wujinhu |
+| [YARN-7642](https://issues.apache.org/jira/browse/YARN-7642) | Add test case to verify context update after container promotion or demotion with or without auto update | Minor | nodemanager | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15111](https://issues.apache.org/jira/browse/HADOOP-15111) | AliyunOSS: backport HADOOP-14993 to branch-2 | Major | fs/oss | Genmao Yu | Genmao Yu |
+| [HDFS-9023](https://issues.apache.org/jira/browse/HDFS-9023) | When NN is not able to identify DN for replication, reason behind it can be logged | Critical | hdfs-client, namenode | Surendra Singh Lilhore | Xiao Chen |
+| [YARN-7678](https://issues.apache.org/jira/browse/YARN-7678) | Ability to enable logging of container memory stats | Major | nodemanager | Jim Brennan | Jim Brennan |
+| [HDFS-12945](https://issues.apache.org/jira/browse/HDFS-12945) | Switch to ClientProtocol instead of NamenodeProtocols in NamenodeWebHdfsMethods | Minor | . | Wei Yan | Wei Yan |
+| [YARN-7590](https://issues.apache.org/jira/browse/YARN-7590) | Improve container-executor validation check | Major | security, yarn | Eric Yang | Eric Yang |
+| [HADOOP-15189](https://issues.apache.org/jira/browse/HADOOP-15189) | backport HADOOP-15039 to branch-2 and branch-3 | Blocker | . | Genmao Yu | Genmao Yu |
+| [HADOOP-15212](https://issues.apache.org/jira/browse/HADOOP-15212) | Add independent secret manager method for logging expired tokens | Major | security | Daryn Sharp | Daryn Sharp |
+| [YARN-7728](https://issues.apache.org/jira/browse/YARN-7728) | Expose container preemptions related information in Capacity Scheduler queue metrics | Major | . | Eric Payne | Eric Payne |
+| [MAPREDUCE-7048](https://issues.apache.org/jira/browse/MAPREDUCE-7048) | Uber AM can crash due to unknown task in statusUpdate | Major | mr-am | Peter Bacsko | Peter Bacsko |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to support per-store configuration | Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity Scheduler Intra-queue Preemption should be configurable for each queue | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize disk access for last partial chunk checksum of Finalized replica | Major | datanode | Wei-Chiu Chuang | Gabor Bota |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | increase maven heap size recommendations | Minor | build, documentation, test | Allen Wittenauer | Allen Wittenauer |
+| [HDFS-12884](https://issues.apache.org/jira/browse/HDFS-12884) | BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo | Major | namenode | Konstantin Shvachko | chencan |
+| [HADOOP-15334](https://issues.apache.org/jira/browse/HADOOP-15334) | Upgrade Maven surefire plugin | Major | build | Arpit Agarwal | Arpit Agarwal |
+| [YARN-7623](https://issues.apache.org/jira/browse/YARN-7623) | Fix the CapacityScheduler Queue configuration documentation | Major | . | Arun Suresh | Jonathan Hung |
+| [HDFS-13314](https://issues.apache.org/jira/browse/HDFS-13314) | NameNode should optionally exit if it detects FsImage corruption | Major | namenode | Arpit Agarwal | Arpit Agarwal |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13723](https://issues.apache.org/jira/browse/HADOOP-13723) | AliyunOSSInputStream#read() should update read bytes stat correctly | Major | tools | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14045](https://issues.apache.org/jira/browse/HADOOP-14045) | Aliyun OSS documentation missing from website | Major | documentation, fs/oss | Andrew Wang | Yiqun Lin |
+| [HADOOP-14458](https://issues.apache.org/jira/browse/HADOOP-14458) | Add missing imports to TestAliyunOSSFileSystemContract.java | Trivial | fs/oss, test | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14466](https://issues.apache.org/jira/browse/HADOOP-14466) | Remove useless document from TestAliyunOSSFileSystemContract.java | Minor | documentation | Akira Ajisaka | Chen Liang |
+| [HDFS-12318](https://issues.apache.org/jira/browse/HDFS-12318) | Fix IOException condition for openInfo in DFSInputStream | Major | . | legend | legend |
+| [HDFS-12614](https://issues.apache.org/jira/browse/HDFS-12614) | FSPermissionChecker#getINodeAttrs() throws NPE when INodeAttributesProvider configured | Major | . | Manoj Govindassamy | Manoj Govindassamy |
+| [HDFS-12788](https://issues.apache.org/jira/browse/HDFS-12788) | Reset the upload button when file upload fails | Critical | ui, webhdfs | Brahma Reddy Battula | Brahma Reddy Battula |
+| [YARN-7388](https://issues.apache.org/jira/browse/YARN-7388) | TestAMRestart should be scheduler agnostic | Major | . | Haibo Chen | Haibo Chen |
+| [HDFS-12705](https://issues.apache.org/jira/browse/HDFS-12705) | WebHdfsFileSystem exceptions should retain the caused by exception | Major | hdfs | Daryn Sharp | Hanisha Koneru |
+| [YARN-7361](https://issues.apache.org/jira/browse/YARN-7361) | Improve the docker container runtime documentation | Major | . | Shane Kumpf | Shane Kumpf |
+| [YARN-7469](https://issues.apache.org/jira/browse/YARN-7469) | Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit | Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
+| [YARN-7489](https://issues.apache.org/jira/browse/YARN-7489) | ConcurrentModificationException in RMAppImpl#getRMAppMetrics | Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7525](https://issues.apache.org/jira/browse/YARN-7525) | Incorrect query parameters in cluster nodes REST API document | Minor | documentation | Tao Yang | Tao Yang |
+| [HADOOP-15045](https://issues.apache.org/jira/browse/HADOOP-15045) | ISA-L build options are documented in branch-2 | Major | build, documentation | Akira Ajisaka | Akira Ajisaka |
+| [YARN-7390](https://issues.apache.org/jira/browse/YARN-7390) | All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. | Major | fairscheduler, reservation system | Yufei Gu | Yufei Gu |
+| [HDFS-12754](https://issues.apache.org/jira/browse/HDFS-12754) | Lease renewal can hit a deadlock | Major | . | Kuhu Shukla | Kuhu Shukla |
+| [HDFS-12832](https://issues.apache.org/jira/browse/HDFS-12832) | INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit | Critical | namenode | DENG FEI | Konstantin Shvachko |
+| [HDFS-11754](https://issues.apache.org/jira/browse/HDFS-11754) | Make FsServerDefaults cache configurable. | Minor | . | Rushabh S Shah | Mikhail Erofeev |
+| [YARN-7509](https://issues.apache.org/jira/browse/YARN-7509) | AsyncScheduleThread and ResourceCommitterService are still running after RM is transitioned to standby | Critical | . | Tao Yang | Tao Yang |
+| [YARN-7558](https://issues.apache.org/jira/browse/YARN-7558) | "yarn logs" command fails to get logs for running containers if UI authentication is enabled. | Critical | . | Namit Maheshwari | Xuan Gong |
+| [HDFS-12638](https://issues.apache.org/jira/browse/HDFS-12638) | Delete copy-on-truncate block along with the original block, when deleting a file being truncated | Blocker | hdfs | Jiandan Yang | Konstantin Shvachko |
+| [MAPREDUCE-5124](https://issues.apache.org/jira/browse/MAPREDUCE-5124) | AM lacks flow control for task events | Major | mr-am | Jason Lowe | Peter Bacsko |
+| [YARN-7455](https://issues.apache.org/jira/browse/YARN-7455) | quote\_and\_append\_arg can overflow buffer | Major | nodemanager | Jason Lowe | Jim Brennan |
+| [HADOOP-14985](https://issues.apache.org/jira/browse/HADOOP-14985) | Remove subversion related code from VersionInfoMojo.java | Minor | build | Akira Ajisaka | Ajay Kumar |
+| [HDFS-12889](https://issues.apache.org/jira/browse/HDFS-12889) | Router UI is missing robots.txt file | Major | . | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-11576](https://issues.apache.org/jira/browse/HDFS-11576) | Block recovery will fail indefinitely if recovery time \> heartbeat interval | Critical | datanode, hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [YARN-7607](https://issues.apache.org/jira/browse/YARN-7607) | Remove the trailing duplicated timestamp in container diagnostics message | Minor | nodemanager | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15080](https://issues.apache.org/jira/browse/HADOOP-15080) | Aliyun OSS: update oss sdk from 2.8.1 to 2.8.3 to remove its dependency on Cat-x "json-lib" | Blocker | fs/oss | Chris Douglas | SammiChen |
+| [YARN-7591](https://issues.apache.org/jira/browse/YARN-7591) | NPE in async-scheduling mode of CapacityScheduler | Critical | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7608](https://issues.apache.org/jira/browse/YARN-7608) | Incorrect sTarget column causing DataTable warning on RM application and scheduler web page | Major | resourcemanager, webapp | Weiwei Yang | Gergely Novák |
+| [HDFS-12833](https://issues.apache.org/jira/browse/HDFS-12833) | Distcp : Update the usage of delete option for dependency with update and overwrite option | Minor | distcp, hdfs | Harshakiran Reddy | usharani |
+| [YARN-7647](https://issues.apache.org/jira/browse/YARN-7647) | NM print inappropriate error log when node-labels is enabled | Minor | . | Yang Wang | Yang Wang |
+| [HDFS-12907](https://issues.apache.org/jira/browse/HDFS-12907) | Allow read-only access to reserved raw for non-superusers | Major | namenode | Daryn Sharp | Rushabh S Shah |
+| [HDFS-12881](https://issues.apache.org/jira/browse/HDFS-12881) | Output streams closed with IOUtils suppressing write errors | Major | . | Jason Lowe | Ajay Kumar |
+| [YARN-7595](https://issues.apache.org/jira/browse/YARN-7595) | Container launching code suppresses close exceptions after writes | Major | nodemanager | Jason Lowe | Jim Brennan |
+| [HADOOP-15085](https://issues.apache.org/jira/browse/HADOOP-15085) | Output streams closed with IOUtils suppressing write errors | Major | . | Jason Lowe | Jim Brennan |
+| [YARN-7661](https://issues.apache.org/jira/browse/YARN-7661) | NodeManager metrics return wrong value after update node resource | Major | . | Yang Wang | Yang Wang |
+| [HDFS-12347](https://issues.apache.org/jira/browse/HDFS-12347) | TestBalancerRPCDelay#testBalancerRPCDelay fails very frequently | Critical | test | Xiao Chen | Bharat Viswanadham |
+| [YARN-7542](https://issues.apache.org/jira/browse/YARN-7542) | Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED | Major | . | Arun Suresh | Sampada Dehankar |
+| [HADOOP-15143](https://issues.apache.org/jira/browse/HADOOP-15143) | NPE due to Invalid KerberosTicket in UGI | Major | . | Jitendra Nath Pandey | Mukul Kumar Singh |
+| [YARN-7692](https://issues.apache.org/jira/browse/YARN-7692) | Skip validating priority acls while recovering applications | Blocker | resourcemanager | Charan Hebri | Sunil G |
+| [MAPREDUCE-7028](https://issues.apache.org/jira/browse/MAPREDUCE-7028) | Concurrent task progress updates causing NPE in Application Master | Blocker | mr-am | Gergo Repas | Gergo Repas |
+| [YARN-7619](https://issues.apache.org/jira/browse/YARN-7619) | Max AM Resource value in Capacity Scheduler UI has to be refreshed for every user | Major | capacity scheduler, yarn | Eric Payne | Eric Payne |
+| [YARN-7699](https://issues.apache.org/jira/browse/YARN-7699) | queueUsagePercentage is coming as INF for getApp REST api call | Major | webapp | Sunil G | Sunil G |
+| [YARN-7508](https://issues.apache.org/jira/browse/YARN-7508) | NPE in FiCaSchedulerApp when debug log enabled in async-scheduling mode | Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-7663](https://issues.apache.org/jira/browse/YARN-7663) | RMAppImpl:Invalid event: START at KILLED | Minor | resourcemanager | lujie | lujie |
+| [YARN-6948](https://issues.apache.org/jira/browse/YARN-6948) | Invalid event: ATTEMPT\_ADDED at FINAL\_SAVING | Minor | yarn | lujie | lujie |
+| [YARN-7735](https://issues.apache.org/jira/browse/YARN-7735) | Fix typo in YARN documentation | Minor | documentation | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-7727](https://issues.apache.org/jira/browse/YARN-7727) | Incorrect log levels in few logs with QueuePriorityContainerCandidateSelector | Minor | yarn | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-11915](https://issues.apache.org/jira/browse/HDFS-11915) | Sync rbw dir on the first hsync() to avoid file lost on power failure | Critical | . | Kanaka Kumar Avvaru | Vinayakumar B |
+| [HDFS-9049](https://issues.apache.org/jira/browse/HDFS-9049) | Make Datanode Netty reverse proxy port to be configurable | Major | datanode | Vinayakumar B | Vinayakumar B |
+| [HADOOP-15150](https://issues.apache.org/jira/browse/HADOOP-15150) | in FsShell, UGI params should be overidden through env vars(-D arg) | Major | . | Brahma Reddy Battula | Brahma Reddy Battula |
+| [HADOOP-15181](https://issues.apache.org/jira/browse/HADOOP-15181) | Typo in SecureMode.md | Trivial | documentation | Masahiro Tanaka | Masahiro Tanaka |
+| [YARN-7737](https://issues.apache.org/jira/browse/YARN-7737) | prelaunch.err file not found exception on container failure | Major | . | Jonathan Hung | Keqiu Hu |
+| [HDFS-13063](https://issues.apache.org/jira/browse/HDFS-13063) | Fix the incorrect spelling in HDFSHighAvailabilityWithQJM.md | Trivial | documentation | Jianfei Jiang | Jianfei Jiang |
+| [YARN-7102](https://issues.apache.org/jira/browse/YARN-7102) | NM heartbeat stuck when responseId overflows MAX\_INT | Critical | . | Botong Huang | Botong Huang |
+| [HADOOP-15151](https://issues.apache.org/jira/browse/HADOOP-15151) | MapFile.fix creates a wrong index file in case of block-compressed data file. | Major | common | Grigori Rybkine | Grigori Rybkine |
+| [MAPREDUCE-7020](https://issues.apache.org/jira/browse/MAPREDUCE-7020) | Task timeout in uber mode can crash AM | Major | mr-am | Akira Ajisaka | Peter Bacsko |
+| [YARN-7698](https://issues.apache.org/jira/browse/YARN-7698) | A misleading variable's name in ApplicationAttemptEventDispatcher | Minor | resourcemanager | Jinjiang Ling | Jinjiang Ling |
+| [HDFS-13100](https://issues.apache.org/jira/browse/HDFS-13100) | Handle IllegalArgumentException when GETSERVERDEFAULTS is not implemented in webhdfs. | Critical | hdfs, webhdfs | Yongjun Zhang | Yongjun Zhang |
+| [YARN-6868](https://issues.apache.org/jira/browse/YARN-6868) | Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml | Major | yarn | Ray Chiang | Ray Chiang |
+| [YARN-7849](https://issues.apache.org/jira/browse/YARN-7849) | TestMiniYarnClusterNodeUtilization#testUpdateNodeUtilization fails due to heartbeat sync error | Major | test | Jason Lowe | Botong Huang |
+| [YARN-7801](https://issues.apache.org/jira/browse/YARN-7801) | AmFilterInitializer should addFilter after fill all parameters | Critical | . | Sumana Sathish | Wangda Tan |
+| [YARN-7890](https://issues.apache.org/jira/browse/YARN-7890) | NPE during container relaunch | Major | . | Billie Rinaldi | Jason Lowe |
+| [HDFS-12935](https://issues.apache.org/jira/browse/HDFS-12935) | Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up | Major | tools | Jianfei Jiang | Jianfei Jiang |
+| [HDFS-13120](https://issues.apache.org/jira/browse/HDFS-13120) | Snapshot diff could be corrupted after concat | Major | namenode, snapshots | Xiaoyu Yao | Xiaoyu Yao |
+| [HDFS-10453](https://issues.apache.org/jira/browse/HDFS-10453) | ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster. | Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [HDFS-8693](https://issues.apache.org/jira/browse/HDFS-8693) | refreshNamenodes does not support adding a new standby to a running DN | Critical | datanode, ha | Jian Fang | Ajith S |
+| [MAPREDUCE-7052](https://issues.apache.org/jira/browse/MAPREDUCE-7052) | TestFixedLengthInputFormat#testFormatCompressedIn is flaky | Major | client, test | Peter Bacsko | Peter Bacsko |
+| [HDFS-13112](https://issues.apache.org/jira/browse/HDFS-13112) | Token expiration edits may cause log corruption or deadlock | Critical | namenode | Daryn Sharp | Daryn Sharp |
+| [MAPREDUCE-7053](https://issues.apache.org/jira/browse/MAPREDUCE-7053) | Timed out tasks can fail to produce thread dump | Major | . | Jason Lowe | Jason Lowe |
+| [HADOOP-15206](https://issues.apache.org/jira/browse/HADOOP-15206) | BZip2 drops and duplicates records when input split size is small | Major | . | Aki Tanaka | Aki Tanaka |
+| [YARN-7947](https://issues.apache.org/jira/browse/YARN-7947) | Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps | Major | capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [YARN-7945](https://issues.apache.org/jira/browse/YARN-7945) | Java Doc error in UnmanagedAMPoolManager for branch-2 | Major | . | Rohith Sharma K S | Botong Huang |
+| [HADOOP-14903](https://issues.apache.org/jira/browse/HADOOP-14903) | Add json-smart explicitly to pom.xml | Major | common | Ray Chiang | Ray Chiang |
+| [HDFS-12781](https://issues.apache.org/jira/browse/HDFS-12781) | After Datanode down, In Namenode UI Datanode tab is throwing warning message. | Major | datanode | Harshakiran Reddy | Brahma Reddy Battula |
+| [HDFS-12070](https://issues.apache.org/jira/browse/HDFS-12070) | Failed block recovery leaves files open indefinitely and at risk for data loss | Major | . | Daryn Sharp | Kihwal Lee |
+| [HADOOP-15251](https://issues.apache.org/jira/browse/HADOOP-15251) | Backport HADOOP-13514 (surefire upgrade) to branch-2 | Major | test | Chris Douglas | Chris Douglas |
+| [HADOOP-15275](https://issues.apache.org/jira/browse/HADOOP-15275) | Incorrect javadoc for return type of RetryPolicy#shouldRetry | Minor | documentation | Nanda kumar | Nanda kumar |
+| [YARN-7511](https://issues.apache.org/jira/browse/YARN-7511) | NPE in ContainerLocalizer when localization failed for running container | Major | nodemanager | Tao Yang | Tao Yang |
+| [MAPREDUCE-7023](https://issues.apache.org/jira/browse/MAPREDUCE-7023) | TestHadoopArchiveLogs.testCheckFilesAndSeedApps fails on rerun | Minor | test | Gergely Novák | Gergely Novák |
+| [HADOOP-15283](https://issues.apache.org/jira/browse/HADOOP-15283) | Upgrade from findbugs 3.0.1 to spotbugs 3.1.2 in branch-2 to fix docker image build | Major | . | Xiao Chen | Akira Ajisaka |
+| [YARN-7736](https://issues.apache.org/jira/browse/YARN-7736) | Fix itemization in YARN federation document | Minor | documentation | Akira Ajisaka | Sen Zhao |
+| [HDFS-13164](https://issues.apache.org/jira/browse/HDFS-13164) | File not closed if streamer fail with DSQuotaExceededException | Major | hdfs-client | Xiao Chen | Xiao Chen |
+| [HDFS-13109](https://issues.apache.org/jira/browse/HDFS-13109) | Support fully qualified hdfs path in EZ commands | Major | hdfs | Hanisha Koneru | Hanisha Koneru |
+| [MAPREDUCE-6930](https://issues.apache.org/jira/browse/MAPREDUCE-6930) | mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml | Major | mrv2 | Daniel Templeton | Sen Zhao |
+| [HDFS-12156](https://issues.apache.org/jira/browse/HDFS-12156) | TestFSImage fails without -Pnative | Major | test | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15308](https://issues.apache.org/jira/browse/HADOOP-15308) | TestConfiguration fails on Windows because of paths | Major | . | Íñigo Goiri | Xiao Liang |
+| [YARN-7636](https://issues.apache.org/jira/browse/YARN-7636) | Re-reservation count may overflow when cluster resource exhausted for a long time | Major | capacityscheduler | Tao Yang | Tao Yang |
+| [HDFS-12886](https://issues.apache.org/jira/browse/HDFS-12886) | Ignore minReplication for block recovery | Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [HDFS-13296](https://issues.apache.org/jira/browse/HDFS-13296) | GenericTestUtils generates paths with drive letter in Windows and fail webhdfs related test cases | Major | . | Xiao Liang | Xiao Liang |
+| [HDFS-13268](https://issues.apache.org/jira/browse/HDFS-13268) | TestWebHdfsFileContextMainOperations fails on Windows | Major | . | Íñigo Goiri | Xiao Liang |
+| [YARN-8054](https://issues.apache.org/jira/browse/YARN-8054) | Improve robustness of the LocalDirsHandlerService MonitoringTimerTask thread | Major | . | Jonathan Eagles | Jonathan Eagles |
+| [YARN-7873](https://issues.apache.org/jira/browse/YARN-7873) | Revert YARN-6078 | Blocker | . | Billie Rinaldi | Billie Rinaldi |
+| [HDFS-13195](https://issues.apache.org/jira/browse/HDFS-13195) | DataNode conf page cannot display the current value after reconfig | Minor | datanode | maobaolong | maobaolong |
+| [HADOOP-15320](https://issues.apache.org/jira/browse/HADOOP-15320) | Remove customized getFileBlockLocations for hadoop-azure and hadoop-azure-datalake | Major | fs/adl, fs/azure | shanyu zhao | shanyu zhao |
+| [HADOOP-12862](https://issues.apache.org/jira/browse/HADOOP-12862) | LDAP Group Mapping over SSL can not specify trust store | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HDFS-13427](https://issues.apache.org/jira/browse/HDFS-13427) | Fix the section titles of transparent encryption document | Minor | documentation | Akira Ajisaka | Akira Ajisaka |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-14696](https://issues.apache.org/jira/browse/HADOOP-14696) | parallel tests don't work for Windows | Minor | test | Allen Wittenauer | Allen Wittenauer |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-13481](https://issues.apache.org/jira/browse/HADOOP-13481) | User end documents for Aliyun OSS FileSystem | Minor | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-13591](https://issues.apache.org/jira/browse/HADOOP-13591) | Unit test failure in TestOSSContractGetFileStatus and TestOSSContractRootDir | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-13624](https://issues.apache.org/jira/browse/HADOOP-13624) | Rename TestAliyunOSSContractDispCp | Major | fs, fs/oss | Kai Zheng | Genmao Yu |
+| [HADOOP-14065](https://issues.apache.org/jira/browse/HADOOP-14065) | AliyunOSS: oss directory filestatus should use meta time | Major | fs/oss | Fei Hui | Fei Hui |
+| [HADOOP-13768](https://issues.apache.org/jira/browse/HADOOP-13768) | AliyunOSS: handle the failure in the batch delete operation `deleteDirs`. | Major | fs | Genmao Yu | Genmao Yu |
+| [HADOOP-14069](https://issues.apache.org/jira/browse/HADOOP-14069) | AliyunOSS: listStatus returns wrong file info | Major | fs/oss | Fei Hui | Fei Hui |
+| [HADOOP-13769](https://issues.apache.org/jira/browse/HADOOP-13769) | AliyunOSS: update oss sdk version | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14072](https://issues.apache.org/jira/browse/HADOOP-14072) | AliyunOSS: Failed to read from stream when seek beyond the download size | Major | fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14192](https://issues.apache.org/jira/browse/HADOOP-14192) | Aliyun OSS FileSystem contract test should implement getTestBaseDir() | Major | fs/oss | Mingliang Liu | Mingliang Liu |
+| [HADOOP-14194](https://issues.apache.org/jira/browse/HADOOP-14194) | Aliyun OSS should not use empty endpoint as default | Major | fs/oss | Mingliang Liu | Genmao Yu |
+| [HADOOP-14787](https://issues.apache.org/jira/browse/HADOOP-14787) | AliyunOSS: Implement the `createNonRecursive` operator | Major | fs, fs/oss | Genmao Yu | Genmao Yu |
+| [HADOOP-14649](https://issues.apache.org/jira/browse/HADOOP-14649) | Update aliyun-sdk-oss version to 2.8.1 | Major | fs/oss | Ray Chiang | Genmao Yu |
+| [HADOOP-14799](https://issues.apache.org/jira/browse/HADOOP-14799) | Update nimbus-jose-jwt to 4.41.1 | Major | . | Ray Chiang | Ray Chiang |
+| [HADOOP-14997](https://issues.apache.org/jira/browse/HADOOP-14997) | Add hadoop-aliyun as dependency of hadoop-cloud-storage | Minor | fs/oss | Genmao Yu | Genmao Yu |
+| [HDFS-12801](https://issues.apache.org/jira/browse/HDFS-12801) | RBF: Set MountTableResolver as default file resolver | Minor | . | Íñigo Goiri | Íñigo Goiri |
+| [YARN-7430](https://issues.apache.org/jira/browse/YARN-7430) | Enable user re-mapping for Docker containers by default | Blocker | security, yarn | Eric Yang | Eric Yang |
+| [YARN-6128](https://issues.apache.org/jira/browse/YARN-6128) | Add support for AMRMProxy HA | Major | amrmproxy, nodemanager | Subru Krishnan | Botong Huang |
+| [HADOOP-15024](https://issues.apache.org/jira/browse/HADOOP-15024) | AliyunOSS: support user agent configuration and include that & Hadoop version information to oss server | Major | fs, fs/oss | SammiChen | SammiChen |
+| [HDFS-12858](https://issues.apache.org/jira/browse/HDFS-12858) | RBF: Add router admin commands usage in HDFS commands reference doc | Minor | documentation | Yiqun Lin | Yiqun Lin |
+| [HDFS-12835](https://issues.apache.org/jira/browse/HDFS-12835) | RBF: Fix Javadoc parameter errors | Minor | . | Wei Yan | Wei Yan |
+| [YARN-7587](https://issues.apache.org/jira/browse/YARN-7587) | Skip dispatching opportunistic containers to nodes whose queue is already full | Major | . | Weiwei Yang | Weiwei Yang |
+| [HDFS-12396](https://issues.apache.org/jira/browse/HDFS-12396) | Webhdfs file system should get delegation token from kms provider. | Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
+| [YARN-6704](https://issues.apache.org/jira/browse/YARN-6704) | Add support for work preserving NM restart when FederationInterceptor is enabled in AMRMProxyService | Major | . | Botong Huang | Botong Huang |
+| [HDFS-12875](https://issues.apache.org/jira/browse/HDFS-12875) | RBF: Complete logic for -readonly option of dfsrouteradmin add command | Major | . | Yiqun Lin | Íñigo Goiri |
+| [YARN-7630](https://issues.apache.org/jira/browse/YARN-7630) | Fix AMRMToken rollover handling in AMRMProxy | Minor | . | Botong Huang | Botong Huang |
+| [HDFS-12937](https://issues.apache.org/jira/browse/HDFS-12937) | RBF: Add more unit tests for router admin commands | Major | test | Yiqun Lin | Yiqun Lin |
+| [HDFS-12988](https://issues.apache.org/jira/browse/HDFS-12988) | RBF: Mount table entries not properly updated in the local cache | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | backport HADOOP-15086 rename fix to branch-2 | Major | fs/azure | Thomas Marquardt | Thomas Marquardt |
+| [YARN-7716](https://issues.apache.org/jira/browse/YARN-7716) | metricsTimeStart and metricsTimeEnd should be all lower case in the doc | Major | timelinereader | Haibo Chen | Haibo Chen |
+| [HDFS-12802](https://issues.apache.org/jira/browse/HDFS-12802) | RBF: Control MountTableResolver cache size | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance | Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-13028](https://issues.apache.org/jira/browse/HDFS-13028) | RBF: Fix spurious TestRouterRpc#testProxyGetStats | Minor | . | Íñigo Goiri | Íñigo Goiri |
+| [YARN-5094](https://issues.apache.org/jira/browse/YARN-5094) | some YARN container events have timestamp of -1 | Critical | . | Sangjin Lee | Haibo Chen |
+| [YARN-7782](https://issues.apache.org/jira/browse/YARN-7782) | Enable user re-mapping for Docker containers in yarn-default.xml | Blocker | security, yarn | Eric Yang | Eric Yang |
+| [HDFS-12772](https://issues.apache.org/jira/browse/HDFS-12772) | RBF: Federation Router State State Store internal API | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13042](https://issues.apache.org/jira/browse/HDFS-13042) | RBF: Heartbeat Router State | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13049](https://issues.apache.org/jira/browse/HDFS-13049) | RBF: Inconsistent Router OPTS config in branch-2 and branch-3 | Minor | . | Wei Yan | Wei Yan |
+| [HDFS-12574](https://issues.apache.org/jira/browse/HDFS-12574) | Add CryptoInputStream to WebHdfsFileSystem read call. | Major | encryption, kms, webhdfs | Rushabh S Shah | Rushabh S Shah |
+| [HDFS-13044](https://issues.apache.org/jira/browse/HDFS-13044) | RBF: Add a safe mode for the Router | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13043](https://issues.apache.org/jira/browse/HDFS-13043) | RBF: Expose the state of the Routers in the federation | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13068](https://issues.apache.org/jira/browse/HDFS-13068) | RBF: Add router admin option to manage safe mode | Major | . | Íñigo Goiri | Yiqun Lin |
+| [HDFS-13119](https://issues.apache.org/jira/browse/HDFS-13119) | RBF: Manage unavailable clusters | Major | . | Íñigo Goiri | Yiqun Lin |
+| [HDFS-13187](https://issues.apache.org/jira/browse/HDFS-13187) | RBF: Fix Routers information shown in the web UI | Minor | . | Wei Yan | Wei Yan |
+| [HDFS-13184](https://issues.apache.org/jira/browse/HDFS-13184) | RBF: Improve the unit test TestRouterRPCClientRetries | Minor | test | Yiqun Lin | Yiqun Lin |
+| [HDFS-13199](https://issues.apache.org/jira/browse/HDFS-13199) | RBF: Fix the hdfs router page missing label icon issue | Major | federation, hdfs | maobaolong | maobaolong |
+| [HDFS-13214](https://issues.apache.org/jira/browse/HDFS-13214) | RBF: Complete document of Router configuration | Major | . | Tao Jie | Yiqun Lin |
+| [HDFS-13230](https://issues.apache.org/jira/browse/HDFS-13230) | RBF: ConnectionManager's cleanup task will compare each pool's own active conns with its total conns | Minor | . | Wei Yan | Chao Sun |
+| [HDFS-13233](https://issues.apache.org/jira/browse/HDFS-13233) | RBF: MountTableResolver doesn't return the correct mount point of the given path | Major | hdfs | wangzhiyuan | wangzhiyuan |
+| [HDFS-13212](https://issues.apache.org/jira/browse/HDFS-13212) | RBF: Fix router location cache issue | Major | federation, hdfs | Weiwei Wu | Weiwei Wu |
+| [HDFS-13232](https://issues.apache.org/jira/browse/HDFS-13232) | RBF: ConnectionPool should return first usable connection | Minor | . | Wei Yan | Ekanth S |
+| [HDFS-13240](https://issues.apache.org/jira/browse/HDFS-13240) | RBF: Update some inaccurate document descriptions | Minor | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-11399](https://issues.apache.org/jira/browse/HDFS-11399) | Many tests fails in Windows due to injecting disk failures | Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13241](https://issues.apache.org/jira/browse/HDFS-13241) | RBF: TestRouterSafemode failed if the port 8888 is in use | Major | hdfs, test | maobaolong | maobaolong |
+| [HDFS-13253](https://issues.apache.org/jira/browse/HDFS-13253) | RBF: Quota management incorrect parent-child relationship judgement | Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13226](https://issues.apache.org/jira/browse/HDFS-13226) | RBF: Throw the exception if mount table entry validated failed | Major | hdfs | maobaolong | maobaolong |
+| [HDFS-12773](https://issues.apache.org/jira/browse/HDFS-12773) | RBF: Improve State Store FS implementation | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13198](https://issues.apache.org/jira/browse/HDFS-13198) | RBF: RouterHeartbeatService throws out CachedStateStore related exceptions when starting router | Minor | . | Wei Yan | Wei Yan |
+| [HDFS-13224](https://issues.apache.org/jira/browse/HDFS-13224) | RBF: Resolvers to support mount points across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-15262](https://issues.apache.org/jira/browse/HADOOP-15262) | AliyunOSS: move files under a directory in parallel when rename a directory | Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-13215](https://issues.apache.org/jira/browse/HDFS-13215) | RBF: Move Router to its own module | Major | . | Íñigo Goiri | Wei Yan |
+| [HDFS-13250](https://issues.apache.org/jira/browse/HDFS-13250) | RBF: Router to manage requests across multiple subclusters | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13318](https://issues.apache.org/jira/browse/HDFS-13318) | RBF: Fix FindBugs in hadoop-hdfs-rbf | Minor | . | Íñigo Goiri | Ekanth S |
+| [HDFS-12792](https://issues.apache.org/jira/browse/HDFS-12792) | RBF: Test Router-based federation using HDFSContract | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-12512](https://issues.apache.org/jira/browse/HDFS-12512) | RBF: Add WebHDFS | Major | fs | Íñigo Goiri | Wei Yan |
+| [HDFS-13291](https://issues.apache.org/jira/browse/HDFS-13291) | RBF: Implement available space based OrderResolver | Major | . | Yiqun Lin | Yiqun Lin |
+| [HDFS-13204](https://issues.apache.org/jira/browse/HDFS-13204) | RBF: Optimize name service safe mode icon | Minor | . | liuhongtong | liuhongtong |
+| [HDFS-13352](https://issues.apache.org/jira/browse/HDFS-13352) | RBF: Add xsl stylesheet for hdfs-rbf-default.xml | Major | documentation | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-8010](https://issues.apache.org/jira/browse/YARN-8010) | Add config in FederationRMFailoverProxy to not bypass facade cache when failing over | Minor | . | Botong Huang | Botong Huang |
+| [HDFS-13347](https://issues.apache.org/jira/browse/HDFS-13347) | RBF: Cache datanode reports | Minor | . | Íñigo Goiri | Íñigo Goiri |
+| [HDFS-13289](https://issues.apache.org/jira/browse/HDFS-13289) | RBF: TestConnectionManager#testCleanup() test case need correction | Minor | . | Dibyendu Karmakar | Dibyendu Karmakar |
+| [HDFS-13364](https://issues.apache.org/jira/browse/HDFS-13364) | RBF: Support NamenodeProtocol in the Router | Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-14651](https://issues.apache.org/jira/browse/HADOOP-14651) | Update okhttp version to 2.7.5 | Major | fs/adl | Ray Chiang | Ray Chiang |
+| [HADOOP-14999](https://issues.apache.org/jira/browse/HADOOP-14999) | AliyunOSS: provide one asynchronous multi-part based uploading mechanism | Major | fs/oss | Genmao Yu | Genmao Yu |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15149](https://issues.apache.org/jira/browse/HADOOP-15149) | CryptoOutputStream should implement StreamCapabilities | Major | fs | Mike Drob | Xiao Chen |
+| [YARN-7691](https://issues.apache.org/jira/browse/YARN-7691) | Add Unit Tests for ContainersLauncher | Major | . | Sampada Dehankar | Sampada Dehankar |
+| [HADOOP-15177](https://issues.apache.org/jira/browse/HADOOP-15177) | Update the release year to 2018 | Blocker | build | Akira Ajisaka | Bharat Viswanadham |
+
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dc346d/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
new file mode 100644
index 0000000..bed70b1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/RELEASENOTES.2.9.1.md
@@ -0,0 +1,88 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop" 2.9.1 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | *Major* | **Incorporate Aliyun OSS file system implementation**
+
+Aliyun OSS is widely used among China’s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | *Major* | **AliyunOSS: backport Aliyun OSS module to branch-2**
+
+Aliyun OSS is widely used among China’s cloud users and this work implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss:// scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | **RBF: Document Router and State Store metrics**
+
+This JIRA makes following change:
+Change Router metrics context from 'router' to 'dfs'.
+
+
+---
+
+* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | **RBF: Add ACL support for mount table**
+
+Mount tables support ACL, The users won't be able to modify their own entries (we are assuming these old (no-permissions before) mount table with owner:superuser, group:supergroup, permission:755 as the default permissions). The fix way is login as superuser to modify these mount table entries.
+
+
+---
+
+* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | **Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath**
+
+Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath.
+
+
+---
+
+* [HADOOP-15156](https://issues.apache.org/jira/browse/HADOOP-15156) | *Major* | **backport HADOOP-15086 rename fix to branch-2**
+
+[WASB] Fix Azure implementation of Filesystem.rename to ensure that at most one operation succeeds when there are multiple, concurrent rename operations targeting the same destination file.
+
+
+---
+
+* [HADOOP-15027](https://issues.apache.org/jira/browse/HADOOP-15027) | *Major* | **AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance**
+
+Support multi-thread pre-read in AliyunOSSInputStream to improve the sequential read performance from Hadoop to Aliyun OSS.
+
+
+---
+
+* [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | *Major* | **RBF: Fix doc error setting up client**
+
+Fix the document error of setting up HFDS Router Federation
+
+
+---
+
+* [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | *Minor* | **RBF: Use the ZooKeeper as the default State Store**
+
+Change default State Store from local file to ZooKeeper. This will require additional zk address to be configured.
+
+
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dc346d/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
new file mode 100644
index 0000000..a5d87c7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.9.1.xml
@@ -0,0 +1,312 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Mon Apr 16 12:03:07 UTC 2018 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="Apache Hadoop HDFS 2.9.1"
+ jdversion="1.0.9">
+
+<!-- Command line arguments = -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.9.1.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.9.1.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/4.41.1/nimbus-jose-jwt-4.41.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/1.3.1/json-smart-1.3.1.jar:/maven/org/apache/directory/serv
er/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.9.1.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus
/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/apache/commons/commons-lang3/3.4/commons-lang3-3.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoug
htworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.5/snappy-java-1.0.5.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-2.9.1.jar:/maven/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar:/maven/com/squareup/okio/okio/1.6.0/okio-1.6.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortb
ay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/xmlenc/xmlenc/0.52/xmlenc
-0.52.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.9.1 -->
+<package name="org.apache.hadoop.hdfs">
+ <doc>
+ <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files
+have strictly one writer at any one time. Bytes are always appended
+to the end of the writer's stream. There is no notion of "record appends"
+or "mutations" that are then checked or reordered. Writers simply emit
+a byte stream. That byte stream is guaranteed to be stored in the
+order written.</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+ <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+ <interface name="JournalNodeMXBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getJournalsStatus" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+
+ @return A string presenting status for each journal]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for JournalNode information]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+ <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+ <interface name="AuditLogger" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+ </doc>
+ </method>
+ <method name="logAuditEvent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="succeeded" type="boolean"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="cmd" type="java.lang.String"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+ <doc>
+ <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+ metadata (permissions, owner, times, etc).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface defining an audit logger.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+ <class name="HdfsAuditLogger" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+ <constructor name="HdfsAuditLogger"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logAuditEvent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="succeeded" type="boolean"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="cmd" type="java.lang.String"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+ </method>
+ <method name="logAuditEvent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="succeeded" type="boolean"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="cmd" type="java.lang.String"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+ <doc>
+ <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+ (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+ token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+ token tracking information]]>
+ </doc>
+ </method>
+ <method name="logAuditEvent"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="succeeded" type="boolean"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="cmd" type="java.lang.String"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+ <doc>
+ <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Extension of {@link AuditLogger}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+ <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+ <class name="INodeAttributeProvider" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INodeAttributeProvider"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="start"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+ </doc>
+ </method>
+ <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullPath" type="java.lang.String"/>
+ <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+ </method>
+ <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathElements" type="java.lang.String[]"/>
+ <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+ </method>
+ <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="components" type="byte[][]"/>
+ <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+ </method>
+ <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+ <doc>
+ <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4dc346d/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index cfaa698..5f83da3 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
<activeByDefault>false</activeByDefault>
</activation>
<properties>
- <jdiff.stable.api>3.0.2</jdiff.stable.api>
+ <jdiff.stable.api>2.9.1</jdiff.stable.api>
<jdiff.stability>-unstable</jdiff.stability>
<!-- Commented out for HADOOP-11776 -->
<!-- Uncomment param name="${jdiff.compatibility}" in javadoc doclet if compatibility is not empty -->
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[43/50] [abbrv] hadoop git commit: HDFS-13551.
TestMiniDFSCluster#testClusterSetStorageCapacity does not shut down cluster.
Contributed by Anbang Hu.
Posted by xy...@apache.org.
HDFS-13551. TestMiniDFSCluster#testClusterSetStorageCapacity does not shut down cluster. Contributed by Anbang Hu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92ebd466
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92ebd466
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92ebd466
Branch: refs/heads/HDDS-4
Commit: 92ebd466c75275118107e6ec665cf50e337a29e7
Parents: 07d8505
Author: Inigo Goiri <in...@apache.org>
Authored: Tue May 15 10:21:42 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue May 15 10:21:42 2018 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdfs/TestMiniDFSCluster.java | 44 +++++++++++---------
1 file changed, 25 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/92ebd466/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index afc977f..d60e025 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -108,29 +108,35 @@ public class TestMiniDFSCluster {
capacities,
defaultBlockSize,
fileLen);
- verifyStorageCapacity(cluster, capacities);
+ try {
+ verifyStorageCapacity(cluster, capacities);
- /* restart all data nodes */
- cluster.restartDataNodes();
- cluster.waitActive();
- verifyStorageCapacity(cluster, capacities);
+ /* restart all data nodes */
+ cluster.restartDataNodes();
+ cluster.waitActive();
+ verifyStorageCapacity(cluster, capacities);
- /* restart all name nodes */
- cluster.restartNameNodes();
- cluster.waitActive();
- verifyStorageCapacity(cluster, capacities);
+ /* restart all name nodes */
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ verifyStorageCapacity(cluster, capacities);
- /* restart all name nodes firstly and data nodes then */
- cluster.restartNameNodes();
- cluster.restartDataNodes();
- cluster.waitActive();
- verifyStorageCapacity(cluster, capacities);
+ /* restart all name nodes firstly and data nodes then */
+ cluster.restartNameNodes();
+ cluster.restartDataNodes();
+ cluster.waitActive();
+ verifyStorageCapacity(cluster, capacities);
- /* restart all data nodes firstly and name nodes then */
- cluster.restartDataNodes();
- cluster.restartNameNodes();
- cluster.waitActive();
- verifyStorageCapacity(cluster, capacities);
+ /* restart all data nodes firstly and name nodes then */
+ cluster.restartDataNodes();
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ verifyStorageCapacity(cluster, capacities);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
}
private void verifyStorageCapacity(
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[48/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth.
Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/998df5aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/998df5aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/998df5aa
Branch: refs/heads/HDDS-4
Commit: 998df5aad3a9605424e55513de7efcbbd48ed487
Parents: 6348097
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue May 15 16:55:52 2018 -0700
----------------------------------------------------------------------
.../authentication/util/KerberosUtil.java | 2 +-
.../conf/TestConfigurationFieldsBase.java | 2 +
.../java/org/apache/hadoop/hdds/HddsUtils.java | 13 +-
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 10 +-
.../scm/protocol/ScmBlockLocationProtocol.java | 3 +
.../StorageContainerLocationProtocol.java | 4 +
.../protocolPB/ScmBlockLocationProtocolPB.java | 6 +
.../StorageContainerLocationProtocolPB.java | 4 +
.../apache/hadoop/ozone/OzoneConfigKeys.java | 4 +-
.../common/src/main/resources/ozone-default.xml | 41 +++-
.../StorageContainerDatanodeProtocol.java | 4 +
.../StorageContainerDatanodeProtocolPB.java | 6 +
.../scm/server/StorageContainerManager.java | 49 ++++-
.../StorageContainerManagerHttpServer.java | 5 +-
.../ozone/client/protocol/ClientProtocol.java | 3 +
hadoop-ozone/common/src/main/bin/start-ozone.sh | 13 +-
hadoop-ozone/common/src/main/bin/stop-ozone.sh | 13 +-
hadoop-ozone/integration-test/pom.xml | 6 +
.../hadoop/ozone/MiniOzoneClusterImpl.java | 17 +-
.../hadoop/ozone/TestSecureOzoneCluster.java | 205 +++++++++++++++++++
.../ozone/TestStorageContainerManager.java | 4 +-
21 files changed, 365 insertions(+), 49 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index c011045..4459928 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -167,7 +167,7 @@ public class KerberosUtil {
}
/* Return fqdn of the current host */
- static String getLocalHostName() throws UnknownHostException {
+ public static String getLocalHostName() throws UnknownHostException {
return InetAddress.getLocalHost().getCanonicalHostName();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 7f27d7d..c20733d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -436,6 +436,8 @@ public abstract class TestConfigurationFieldsBase {
// Create XML key/value map
LOG_XML.debug("Reading XML property files\n");
xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
+ // Remove hadoop property set in ozone-default.xml
+ xmlKeyValueMap.remove("hadoop.custom.tags");
LOG_XML.debug("\n=====\n");
// Create default configuration variable key/value map
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 48c6dce..17c99bb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -241,18 +241,7 @@ public final class HddsUtils {
}
public static boolean isHddsEnabled(Configuration conf) {
- String securityEnabled =
- conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
- "simple");
- boolean securityAuthorizationEnabled = conf.getBoolean(
- CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false);
-
- if (securityEnabled.equals("kerberos") || securityAuthorizationEnabled) {
- LOG.error("Ozone is not supported in a security enabled cluster. ");
- return false;
- } else {
- return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
- }
+ return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 29ccf30..83a431e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -128,8 +128,9 @@ public final class ScmConfigKeys {
"ozone.scm.http-address";
public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
"ozone.scm.https-address";
- public static final String OZONE_SCM_KEYTAB_FILE =
- "ozone.scm.keytab.file";
+ public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
+ "ozone.scm.kerberos.keytab.file";
+ public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = "ozone.scm.kerberos.principal";
public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
@@ -275,6 +276,11 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_CONTAINER_CLOSE_THRESHOLD =
"ozone.scm.container.close.threshold";
public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
+
+ public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+ "ozone.scm.web.authentication.kerberos.principal";
+ public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
+ "ozone.scm.web.authentication.kerberos.keytab";
/**
* Never constructed.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index c8d4a80..e17f1c2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdds.scm.protocol;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -31,6 +33,7 @@ import java.util.List;
* ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
* to read/write a block.
*/
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ScmBlockLocationProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index e8d85e0..d36bdf3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.hdds.scm.protocol;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@@ -27,11 +29,13 @@ import org.apache.hadoop.hdds.protocol.proto
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
/**
* ContainerLocationProtocol is used by an HDFS node to find the set of nodes
* that currently host a container.
*/
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
public interface StorageContainerLocationProtocol {
/**
* Asks SCM where a container should be allocated. SCM responds with the
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
index 837c95b..89bb066 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
@@ -18,9 +18,13 @@
package org.apache.hadoop.hdds.scm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.ScmBlockLocationProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used from an HDFS node to StorageContainerManager. This extends the
@@ -30,6 +34,8 @@ import org.apache.hadoop.ipc.ProtocolInfo;
"org.apache.hadoop.ozone.protocol.ScmBlockLocationProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
+@KerberosInfo(
+ serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ScmBlockLocationProtocolPB
extends ScmBlockLocationProtocolService.BlockingInterface {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
index f234ad3..3bd83f9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -21,7 +21,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos
.StorageContainerLocationProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used from an HDFS node to StorageContainerManager. This extends the
@@ -30,6 +32,8 @@ import org.apache.hadoop.ipc.ProtocolInfo;
@ProtocolInfo(protocolName =
"org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
protocolVersion = 1)
+@KerberosInfo(
+ serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface StorageContainerLocationProtocolPB
extends StorageContainerLocationProtocolService.BlockingInterface {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index b8dbd7b..affe298 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -243,7 +243,9 @@ public final class OzoneConfigKeys {
public static final String HDDS_DATANODE_PLUGINS_KEY =
"hdds.datanode.plugins";
-
+ public static final String OZONE_SECURITY_ENABLED_KEY = "ozone.security.enabled";
+ public static final String OZONE_SYSTEM_TAGS_KEY = "ozone.system.tags";
+ public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false;
/**
* There is no need to instantiate this class.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 774b1b8..46c67fd 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -776,14 +776,6 @@
</description>
</property>
<property>
- <name>ozone.scm.keytab.file</name>
- <value/>
- <tag>OZONE, SECURITY</tag>
- <description>
- The keytab file for Kerberos authentication in SCM.
- </description>
- </property>
- <property>
<name>ozone.scm.max.container.report.threads</name>
<value>100</value>
<tag>OZONE, PERFORMANCE</tag>
@@ -1058,4 +1050,37 @@
</description>
</property>
+ <property>
+ <name>ozone.security.enabled</name>
+ <value>false</value>
+ <tag> OZONE, SECURITY, FLAG</tag>
+ <description>True if security is enabled for ozone. When this property is true, hadoop.security.authentication should be Kerberos.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.scm.kerberos.keytab.file</name>
+ <value></value>
+ <tag> OZONE, SECURITY</tag>
+ <description> The keytab file used by each SCM daemon to login as its
+ service principal. The principal name is configured with
+ ozone.scm.kerberos.principal.
+ </description>
+ </property>
+ <property>
+ <name>ozone.scm.kerberos.principal</name>
+ <value></value>
+ <tag> OZONE, SECURITY</tag>
+ <description>The SCM service principal. Ex scm/_HOST@REALM.TLD.</description>
+ </property>
+
+ <property>
+ <name>ozone.scm.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@EXAMPLE.COM</value>
+ </property>
+ <property>
+ <name>ozone.scm.web.authentication.kerberos.keytab</name>
+ <value>/etc/security/keytabs/HTTP.keytab</value>
+ </property>
+
</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index cb657276..80887e7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -41,11 +41,15 @@ import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
import java.io.IOException;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
/**
* The protocol spoken between datanodes and SCM. For specifics please the
* Protoc file that defines this protocol.
*/
+@KerberosInfo(
+ serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface StorageContainerDatanodeProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
index 9b28b5a..9c32ef8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
@@ -19,7 +19,10 @@ package org.apache.hadoop.ozone.protocolPB;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos
.StorageContainerDatanodeProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used from a datanode to StorageContainerManager. This extends
@@ -29,6 +32,9 @@ import org.apache.hadoop.ipc.ProtocolInfo;
@ProtocolInfo(protocolName =
"org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
protocolVersion = 1)
+@KerberosInfo(
+ serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
public interface StorageContainerDatanodeProtocolPB extends
StorageContainerDatanodeProtocolService.BlockingInterface {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 0fd6843..21c797d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -28,9 +28,11 @@ import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.protobuf.BlockingService;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
import org.apache.hadoop.hdds.scm.block.BlockManager;
import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
@@ -53,6 +55,9 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.common.StorageInfo;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
@@ -71,6 +76,10 @@ import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
/**
@@ -141,6 +150,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl
* Key = DatanodeUuid, value = ContainerStat.
*/
private Cache<String, ContainerStat> containerReportCache;
+ private Configuration scmConf;
/**
* Creates a new StorageContainerManager. Configuration will be updated
@@ -149,13 +159,19 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl
*
* @param conf configuration
*/
- private StorageContainerManager(OzoneConfiguration conf) throws IOException {
+ private StorageContainerManager(OzoneConfiguration conf)
+ throws IOException, AuthenticationException {
final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-
+ this.scmConf = conf;
StorageContainerManager.initMetrics();
initContainerReportCache(conf);
+ // Authenticate SCM if security is enabled
+ if (this.scmConf.getBoolean(OZONE_SECURITY_ENABLED_KEY,
+ OZONE_SECURITY_ENABLED_DEFAULT)) {
+ loginAsSCMUser(this.scmConf);
+ }
scmStorage = new SCMStorage(conf);
if (scmStorage.getState() != StorageState.INITIALIZED) {
@@ -186,6 +202,33 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl
}
/**
+ * Login as the configured user for SCM.
+ *
+ * @param conf
+ */
+ private void loginAsSCMUser(Configuration conf)
+ throws IOException, AuthenticationException {
+ LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
+ + "Principal: {}, keytab: {}", this.scmConf.get
+ (OZONE_SCM_KERBEROS_PRINCIPAL_KEY),
+ this.scmConf.get(OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY));
+
+ if (SecurityUtil.getAuthenticationMethod(conf).equals
+ (AuthenticationMethod.KERBEROS)) {
+ UserGroupInformation.setConfiguration(this.scmConf);
+ InetSocketAddress socAddr = HddsServerUtil
+ .getScmBlockClientBindAddress(conf);
+ SecurityUtil.login(conf, OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ OZONE_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+ } else {
+ throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
+ (conf) + " authentication method not support. "
+ + "SCM user login failed.");
+ }
+ LOG.info("SCM login successful.");
+ }
+
+ /**
* Builds a message for logging startup information about an RPC server.
*
* @param description RPC server description
@@ -269,7 +312,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl
public static StorageContainerManager createSCM(String[] argv,
OzoneConfiguration conf)
- throws IOException {
+ throws IOException, AuthenticationException {
if (!HddsUtils.isHddsEnabled(conf)) {
System.err.println(
"SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index 75b2036..da936ad 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdds.scm.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.server.BaseHttpServer;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import java.io.IOException;
@@ -63,11 +62,11 @@ public class StorageContainerManagerHttpServer extends BaseHttpServer {
}
@Override protected String getKeytabFile() {
- return ScmConfigKeys.OZONE_SCM_KEYTAB_FILE;
+ return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
}
@Override protected String getSpnegoPrincipal() {
- return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+ return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
}
@Override protected String getEnabledKey() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 94cc257..80b0a40 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.client.protocol;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -33,6 +34,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
/**
* An implementer of this interface is capable of connecting to Ozone Cluster
@@ -42,6 +44,7 @@ import java.util.List;
* includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
* {@link org.apache.hadoop.ozone.client.rest.RestClient} for REST.
*/
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ClientProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-ozone/common/src/main/bin/start-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index dda0a1c..ba0f714 100644
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -47,13 +47,12 @@ else
exit 1
fi
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
-
-if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
- echo "Ozone is not supported in a security enabled cluster."
- exit 1
-fi
+#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
+# echo "Ozone is not supported in a security enabled cluster."
+# exit 1
+#fi
#---------------------------------------------------------
# Check if ozone is enabled
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-ozone/common/src/main/bin/stop-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
index be55be4..ff332f2 100644
--- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
@@ -47,13 +47,12 @@ else
exit 1
fi
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
-
-if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
- echo "Ozone is not supported in a security enabled cluster."
- exit 1
-fi
+#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
+# echo "Ozone is not supported in a security enabled cluster."
+# exit 1
+#fi
#---------------------------------------------------------
# Check if ozone is enabled
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-ozone/integration-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index c8a932c..4aa1aa5 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -42,6 +42,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-minikdc</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-objectstore-service</artifactId>
<scope>provided</scope>
</dependency>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 08d7176..e4f8e62 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.scm.protocolPB
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.test.GenericTestUtils;
import org.slf4j.Logger;
@@ -287,9 +288,16 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
public MiniOzoneCluster build() throws IOException {
DefaultMetricsSystem.setMiniClusterMode(true);
initializeConfiguration();
- StorageContainerManager scm = createSCM();
- scm.start();
- KeySpaceManager ksm = createKSM();
+ StorageContainerManager scm;
+ KeySpaceManager ksm;
+ try {
+ scm = createSCM();
+ scm.start();
+ ksm = createKSM();
+ } catch (AuthenticationException ex) {
+ throw new IOException("Unable to build MiniOzoneCluster. ", ex);
+ }
+
ksm.start();
List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
hddsDatanodes.forEach((datanode) -> datanode.start(null));
@@ -316,7 +324,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
*
* @throws IOException
*/
- private StorageContainerManager createSCM() throws IOException {
+ private StorageContainerManager createSCM()
+ throws IOException, AuthenticationException {
configureSCM();
SCMStorage scmStore = new SCMStorage(conf);
scmStore.setClusterId(clusterId);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
new file mode 100644
index 0000000..9c430ad
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Properties;
+import java.util.UUID;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.KerberosAuthException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test class to for security enabled Ozone cluster.
+ */
+@InterfaceAudience.Private
+public final class TestSecureOzoneCluster {
+
+ private Logger LOGGER = LoggerFactory
+ .getLogger(TestSecureOzoneCluster.class);
+
+ private MiniKdc miniKdc;
+ private OzoneConfiguration conf;
+ private File workDir;
+ private static Properties securityProperties;
+ private File scmKeytab;
+ private File spnegoKeytab;
+ private String curUser;
+
+ @Before
+ public void init() {
+ try {
+ conf = new OzoneConfiguration();
+ startMiniKdc();
+ setSecureConfig(conf);
+ createCredentialsInKDC(conf, miniKdc);
+ } catch (IOException e) {
+ LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
+ } catch (Exception e) {
+ LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
+ }
+ }
+
+ private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
+ throws Exception {
+ createPrincipal(scmKeytab,
+ conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
+ createPrincipal(spnegoKeytab,
+ conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+ }
+
+ private void createPrincipal(File keytab, String... principal)
+ throws Exception {
+ miniKdc.createPrincipal(keytab, principal);
+ }
+
+ private void startMiniKdc() throws Exception {
+ workDir = GenericTestUtils
+ .getTestDir(TestSecureOzoneCluster.class.getSimpleName());
+ securityProperties = MiniKdc.createConf();
+ miniKdc = new MiniKdc(securityProperties, workDir);
+ miniKdc.start();
+ }
+
+ private void setSecureConfig(Configuration conf) throws IOException {
+ conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+ String host = KerberosUtil.getLocalHostName();
+ String realm = miniKdc.getRealm();
+ curUser = UserGroupInformation.getCurrentUser()
+ .getUserName();
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+ conf.set(OZONE_ADMINISTRATORS, curUser);
+
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ "scm/" + host + "@" + realm);
+ conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+ "HTTP_SCM/" + host + "@" + realm);
+
+ scmKeytab = new File(workDir, "scm.keytab");
+ spnegoKeytab = new File(workDir, "http.keytab");
+
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ scmKeytab.getAbsolutePath());
+ conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
+ spnegoKeytab.getAbsolutePath());
+
+ }
+
+ @Test
+ public void testSecureScmStartupSuccess() throws Exception {
+ final String path = GenericTestUtils
+ .getTempPath(UUID.randomUUID().toString());
+ Path scmPath = Paths.get(path, "scm-meta");
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+ conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+ SCMStorage scmStore = new SCMStorage(conf);
+ String clusterId = UUID.randomUUID().toString();
+ String scmId = UUID.randomUUID().toString();
+ scmStore.setClusterId(clusterId);
+ scmStore.setScmId(scmId);
+ // writes the version file properties
+ scmStore.initialize();
+ StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
+ //Reads the SCM Info from SCM instance
+ ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+ Assert.assertEquals(clusterId, scmInfo.getClusterId());
+ Assert.assertEquals(scmId, scmInfo.getScmId());
+ }
+
+ @Test
+ public void testSecureScmStartupFailure() throws Exception {
+ final String path = GenericTestUtils
+ .getTempPath(UUID.randomUUID().toString());
+ Path scmPath = Paths.get(path, "scm-meta");
+
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+ conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ "scm@" + miniKdc.getRealm());
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+
+ SCMStorage scmStore = new SCMStorage(conf);
+ String clusterId = UUID.randomUUID().toString();
+ String scmId = UUID.randomUUID().toString();
+ scmStore.setClusterId(clusterId);
+ scmStore.setScmId(scmId);
+ // writes the version file properties
+ scmStore.initialize();
+ LambdaTestUtils.intercept(IOException.class,
+ "Running in secure mode, but config doesn't have a keytab",
+ () -> {
+ StorageContainerManager.createSCM(null, conf);
+ });
+
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ "scm/_HOST@EXAMPLE.com");
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ "/etc/security/keytabs/scm.keytab");
+
+ LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
+ + "to login: for principal:",
+ () -> {
+ StorageContainerManager.createSCM(null, conf);
+ });
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "OAuth2");
+
+ LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
+ + " attribute value for hadoop.security.authentication of OAuth2",
+ () -> {
+ StorageContainerManager.createSCM(null, conf);
+ });
+
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "KERBEROS_SSL");
+ LambdaTestUtils.intercept(AuthenticationException.class,
+ "KERBEROS_SSL authentication method not support.",
+ () -> {
+ StorageContainerManager.createSCM(null, conf);
+ });
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/998df5aa/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 0081f0d..8e8df7a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.Rule;
import org.junit.Assert;
import org.junit.Test;
@@ -409,7 +410,8 @@ public class TestStorageContainerManager {
}
@Test
- public void testSCMInitializationFailure() throws IOException {
+ public void testSCMInitializationFailure()
+ throws IOException, AuthenticationException {
OzoneConfiguration conf = new OzoneConfiguration();
final String path =
GenericTestUtils.getTempPath(UUID.randomUUID().toString());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[45/50] [abbrv] hadoop git commit: HDFS-13548.
TestResolveHdfsSymlink#testFcResolveAfs fails on Windows. Contributed by
Anbang Hu.
Posted by xy...@apache.org.
HDFS-13548. TestResolveHdfsSymlink#testFcResolveAfs fails on Windows. Contributed by Anbang Hu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d6195c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d6195c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d6195c0
Branch: refs/heads/HDDS-4
Commit: 2d6195c0af4a1840172899ce70bca4ca549eb713
Parents: eaca798
Author: Inigo Goiri <in...@apache.org>
Authored: Tue May 15 11:20:32 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue May 15 11:20:32 2018 -0700
----------------------------------------------------------------------
.../src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d6195c0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
index e068cf9..05060af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
@@ -84,7 +84,7 @@ public class TestResolveHdfsSymlink {
final String localTestRoot = helper.getAbsoluteTestRootDir(fcLocal);
Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
- .toString(), new File(localTestRoot, "alpha").getAbsolutePath());
+ .toString(), new File(localTestRoot, "alpha").getPath());
DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
(short) 1, 2);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[31/50] [abbrv] hadoop git commit: YARN-8288. Fix wrong number of
table columns in Resource Model doc. Contributed by Weiwei Yang.
Posted by xy...@apache.org.
YARN-8288. Fix wrong number of table columns in Resource Model doc. Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a2b5914
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a2b5914
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a2b5914
Branch: refs/heads/HDDS-4
Commit: 8a2b5914f3a68148f40f99105acf5dafcc326e89
Parents: 89d0b87
Author: Naganarasimha <na...@apache.org>
Authored: Tue May 15 00:03:38 2018 +0800
Committer: Naganarasimha <na...@apache.org>
Committed: Tue May 15 00:05:23 2018 +0800
----------------------------------------------------------------------
.../hadoop-yarn-site/src/site/markdown/ResourceModel.md | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a2b5914/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
index 75e5c92..f968b5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md
@@ -45,20 +45,20 @@ The following configuration properties are supported. See below for details.
`resource-types.xml`
-| Configuration Property | Value | Description |
-|:---- |:---- |:---- |
+| Configuration Property | Description |
+|:---- |:---- |
| `yarn.resource-types` | Comma-separated list of additional resources. May not include `memory`, `memory-mb`, or `vcores` |
| `yarn.resource-types.<resource>.units` | Default unit for the specified resource type |
| `yarn.resource-types.<resource>.minimum` | The minimum request for the specified resource type |
| `yarn.resource-types.<resource>.maximum` | The maximum request for the specified resource type |
-`node-resources.xml`
+`node-resources.xml`
-| Configuration Property | Value | Description |
-|:---- |:---- |:---- |
+| Configuration Property | Description |
+|:---- |:---- |
| `yarn.nodemanager.resource-type.<resource>` | The count of the specified resource available from the node manager |
-Please note that the `resource-types.xml` and `node-resources.xml` files
+Please note that the `resource-types.xml` and `node-resources.xml` files
also need to be placed in the same configuration directory as `yarn-site.xml` if
they are used. Alternatively, the properties may be placed into the
`yarn-site.xml` file instead.
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[21/50] [abbrv] hadoop git commit: HDDS-17. Add node to container map
class to simplify state in SCM. Contributed by Anu Engineer.
Posted by xy...@apache.org.
HDDS-17. Add node to container map class to simplify state in SCM.
Contributed by Anu Engineer.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1194ec31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1194ec31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1194ec31
Branch: refs/heads/HDDS-4
Commit: 1194ec31d72c3682bf1c97acbfc99c1798fb9c1b
Parents: 4132855
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 12 09:57:42 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sat May 12 09:58:20 2018 -0700
----------------------------------------------------------------------
.../hadoop/hdds/scm/container/ContainerID.java | 2 +-
.../hdds/scm/container/states/package-info.java | 2 +-
.../hdds/scm/exceptions/SCMException.java | 4 +-
.../hdds/scm/node/states/Node2ContainerMap.java | 184 +++++++++++
.../hdds/scm/node/states/ReportResult.java | 86 ++++++
.../hdds/scm/node/states/package-info.java | 22 ++
.../scm/node/states/Node2ContainerMapTest.java | 308 +++++++++++++++++++
.../hdds/scm/node/states/package-info.java | 23 ++
.../ozone/genesis/GenesisMemoryProfiler.java | 4 +-
9 files changed, 631 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
index 9520c8c..9845c04 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -38,7 +38,7 @@ public class ContainerID implements Comparable {
*/
public ContainerID(long id) {
Preconditions.checkState(id > 0,
- "Container ID should be a positive int");
+ "Container ID should be a positive long. "+ id);
this.id = id;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
index cf20f39..8ad1c8b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
@@ -17,6 +17,6 @@
*/
/**
- * Container States management package.
+ * Container States package.
*/
package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index 227df3c..d7d70ef 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -114,6 +114,8 @@ public class SCMException extends IOException {
FAILED_TO_FIND_BLOCK,
IO_EXCEPTION,
UNEXPECTED_CONTAINER_STATE,
- SCM_NOT_INITIALIZED
+ SCM_NOT_INITIALIZED,
+ DUPLICATE_DATANODE,
+ NO_SUCH_DATANODE
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
new file mode 100644
index 0000000..f850e7a
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE;
+
+/**
+ * This data structure maintains the list of containers that is on a datanode.
+ * This information is built from the DN container reports.
+ */
+public class Node2ContainerMap {
+ private final Map<UUID, Set<ContainerID>> dn2ContainerMap;
+
+ /**
+ * Constructs a Node2ContainerMap Object.
+ */
+ public Node2ContainerMap() {
+ dn2ContainerMap = new ConcurrentHashMap<>();
+ }
+
+ /**
+ * Returns true if this a datanode that is already tracked by
+ * Node2ContainerMap.
+ *
+ * @param datanodeID - UUID of the Datanode.
+ * @return True if this is tracked, false if this map does not know about it.
+ */
+ public boolean isKnownDatanode(UUID datanodeID) {
+ Preconditions.checkNotNull(datanodeID);
+ return dn2ContainerMap.containsKey(datanodeID);
+ }
+
+ /**
+ * Insert a new datanode into Node2Container Map.
+ *
+ * @param datanodeID -- Datanode UUID
+ * @param containerIDs - List of ContainerIDs.
+ */
+ public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
+ throws SCMException {
+ Preconditions.checkNotNull(containerIDs);
+ Preconditions.checkNotNull(datanodeID);
+ if(dn2ContainerMap.putIfAbsent(datanodeID, containerIDs) != null) {
+ throw new SCMException("Node already exists in the map",
+ DUPLICATE_DATANODE);
+ }
+ }
+
+ /**
+ * Updates the Container list of an existing DN.
+ *
+ * @param datanodeID - UUID of DN.
+ * @param containers - Set of Containers tht is present on DN.
+ * @throws SCMException - if we don't know about this datanode, for new DN
+ * use insertNewDatanode.
+ */
+ public void updateDatanodeMap(UUID datanodeID, Set<ContainerID> containers)
+ throws SCMException {
+ Preconditions.checkNotNull(datanodeID);
+ Preconditions.checkNotNull(containers);
+ if(dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> v) == null){
+ throw new SCMException("No such datanode", NO_SUCH_DATANODE);
+ }
+ }
+
+ /**
+ * Removes datanode Entry from the map
+ * @param datanodeID - Datanode ID.
+ */
+ public void removeDatanode(UUID datanodeID) {
+ Preconditions.checkNotNull(datanodeID);
+ dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> null);
+ }
+
+ /**
+ * Returns null if there no containers associated with this datanode ID.
+ *
+ * @param datanode - UUID
+ * @return Set of containers or Null.
+ */
+ public Set<ContainerID> getContainers(UUID datanode) {
+ Preconditions.checkNotNull(datanode);
+ return dn2ContainerMap.computeIfPresent(datanode, (k, v) ->
+ Collections.unmodifiableSet(v));
+ }
+
+ public ReportResult processReport(UUID datanodeID, Set<ContainerID>
+ containers) {
+ Preconditions.checkNotNull(datanodeID);
+ Preconditions.checkNotNull(containers);
+
+ if (!isKnownDatanode(datanodeID)) {
+ return ReportResult.ReportResultBuilder.newBuilder()
+ .setStatus(ReportStatus.NEW_DATANODE_FOUND)
+ .setNewContainers(containers)
+ .build();
+ }
+
+ // Conditions like Zero length containers should be handled by removeAll.
+ Set<ContainerID> currentSet = dn2ContainerMap.get(datanodeID);
+ TreeSet<ContainerID> newContainers = new TreeSet<>(containers);
+ newContainers.removeAll(currentSet);
+
+ TreeSet<ContainerID> missingContainers = new TreeSet<>(currentSet);
+ missingContainers.removeAll(containers);
+
+ if (newContainers.isEmpty() && missingContainers.isEmpty()) {
+ return ReportResult.ReportResultBuilder.newBuilder()
+ .setStatus(ReportStatus.ALL_IS_WELL)
+ .build();
+ }
+
+ if (newContainers.isEmpty() && !missingContainers.isEmpty()) {
+ return ReportResult.ReportResultBuilder.newBuilder()
+ .setStatus(ReportStatus.MISSING_CONTAINERS)
+ .setMissingContainers(missingContainers)
+ .build();
+ }
+
+ if (!newContainers.isEmpty() && missingContainers.isEmpty()) {
+ return ReportResult.ReportResultBuilder.newBuilder()
+ .setStatus(ReportStatus.NEW_CONTAINERS_FOUND)
+ .setNewContainers(newContainers)
+ .build();
+ }
+
+ if (!newContainers.isEmpty() && !missingContainers.isEmpty()) {
+ return ReportResult.ReportResultBuilder.newBuilder()
+ .setStatus(ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND)
+ .setNewContainers(newContainers)
+ .setMissingContainers(missingContainers)
+ .build();
+ }
+
+ // default status & Make compiler happy
+ return ReportResult.ReportResultBuilder.newBuilder()
+ .setStatus(ReportStatus.ALL_IS_WELL)
+ .build();
+ }
+
+
+
+
+
+ /**
+ * Results possible from processing a container report by
+ * Node2ContainerMapper.
+ */
+ public enum ReportStatus {
+ ALL_IS_WELL,
+ MISSING_CONTAINERS,
+ NEW_CONTAINERS_FOUND,
+ MISSING_AND_NEW_CONTAINERS_FOUND,
+ NEW_DATANODE_FOUND
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
new file mode 100644
index 0000000..cb06cb3
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+
+import java.util.Set;
+
+/**
+ * A Container Report gets processsed by the Node2Container and returns the
+ * Report Result class.
+ */
+public class ReportResult {
+ private Node2ContainerMap.ReportStatus status;
+ private Set<ContainerID> missingContainers;
+ private Set<ContainerID> newContainers;
+
+ ReportResult(Node2ContainerMap.ReportStatus status,
+ Set<ContainerID> missingContainers,
+ Set<ContainerID> newContainers) {
+ this.status = status;
+ this.missingContainers = missingContainers;
+ this.newContainers = newContainers;
+ }
+
+ public Node2ContainerMap.ReportStatus getStatus() {
+ return status;
+ }
+
+ public Set<ContainerID> getMissingContainers() {
+ return missingContainers;
+ }
+
+ public Set<ContainerID> getNewContainers() {
+ return newContainers;
+ }
+
+ static class ReportResultBuilder {
+ private Node2ContainerMap.ReportStatus status;
+ private Set<ContainerID> missingContainers;
+ private Set<ContainerID> newContainers;
+
+ static ReportResultBuilder newBuilder() {
+ return new ReportResultBuilder();
+ }
+
+ public ReportResultBuilder setStatus(
+ Node2ContainerMap.ReportStatus newstatus) {
+ this.status = newstatus;
+ return this;
+ }
+
+ public ReportResultBuilder setMissingContainers(
+ Set<ContainerID> missingContainersLit) {
+ this.missingContainers = missingContainersLit;
+ return this;
+ }
+
+ public ReportResultBuilder setNewContainers(
+ Set<ContainerID> newContainersList) {
+ this.newContainers = newContainersList;
+ return this;
+ }
+
+ ReportResult build() {
+ return new ReportResult(status, missingContainers, newContainers);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
new file mode 100644
index 0000000..c429c5c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+/**
+ * Node States package.
+ */
+package org.apache.hadoop.hdds.scm.node.states;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
new file mode 100644
index 0000000..79f1b40
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMapTest.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Test classes for Node2ContainerMap.
+ */
+public class Node2ContainerMapTest {
+ private final static int DATANODE_COUNT = 300;
+ private final static int CONTAINER_COUNT = 1000;
+ private final Map<UUID, TreeSet<ContainerID>> testData = new
+ ConcurrentHashMap<>();
+
+ @Rule
+ public ExpectedException thrown = ExpectedException.none();
+
+ private void generateData() {
+ for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
+ TreeSet<ContainerID> currentSet = new TreeSet<>();
+ for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
+ long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex;
+ currentSet.add(new ContainerID(currentCnIndex));
+ }
+ testData.put(UUID.randomUUID(), currentSet);
+ }
+ }
+
+ private UUID getFirstKey() {
+ return testData.keySet().iterator().next();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ generateData();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ @Test
+ public void testIsKnownDatanode() throws SCMException {
+ Node2ContainerMap map = new Node2ContainerMap();
+ UUID knownNode = getFirstKey();
+ UUID unknownNode = UUID.randomUUID();
+ Set<ContainerID> containerIDs = testData.get(knownNode);
+ map.insertNewDatanode(knownNode, containerIDs);
+ Assert.assertTrue("Not able to detect a known node",
+ map.isKnownDatanode(knownNode));
+ Assert.assertFalse("Unknown node detected",
+ map.isKnownDatanode(unknownNode));
+ }
+
+ @Test
+ public void testInsertNewDatanode() throws SCMException {
+ Node2ContainerMap map = new Node2ContainerMap();
+ UUID knownNode = getFirstKey();
+ Set<ContainerID> containerIDs = testData.get(knownNode);
+ map.insertNewDatanode(knownNode, containerIDs);
+ Set<ContainerID> readSet = map.getContainers(knownNode);
+
+ // Assert that all elements are present in the set that we read back from
+ // node map.
+ Set newSet = new TreeSet((readSet));
+ Assert.assertTrue(newSet.removeAll(containerIDs));
+ Assert.assertTrue(newSet.size() == 0);
+
+ thrown.expect(SCMException.class);
+ thrown.expectMessage("already exists");
+ map.insertNewDatanode(knownNode, containerIDs);
+
+ map.removeDatanode(knownNode);
+ map.insertNewDatanode(knownNode, containerIDs);
+
+ }
+
+ @Test
+ public void testProcessReportCheckOneNode() throws SCMException {
+ UUID key = getFirstKey();
+ Set<ContainerID> values = testData.get(key);
+ Node2ContainerMap map = new Node2ContainerMap();
+ map.insertNewDatanode(key, values);
+ Assert.assertTrue(map.isKnownDatanode(key));
+ ReportResult result = map.processReport(key, values);
+ Assert.assertEquals(result.getStatus(),
+ Node2ContainerMap.ReportStatus.ALL_IS_WELL);
+ }
+
+ @Test
+ public void testProcessReportInsertAll() throws SCMException {
+ Node2ContainerMap map = new Node2ContainerMap();
+
+ for (Map.Entry<UUID, TreeSet<ContainerID>> keyEntry : testData.entrySet()) {
+ map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
+ }
+ // Assert all Keys are known datanodes.
+ for (UUID key : testData.keySet()) {
+ Assert.assertTrue(map.isKnownDatanode(key));
+ }
+ }
+
+ /*
+ For ProcessReport we have to test the following scenarios.
+
+ 1. New Datanode - A new datanode appears and we have to add that to the
+ SCM's Node2Container Map.
+
+ 2. New Container - A Datanode exists, but a new container is added to that
+ DN. We need to detect that and return a list of added containers.
+
+ 3. Missing Container - A Datanode exists, but one of the expected container
+ on that datanode is missing. We need to detect that.
+
+ 4. We get a container report that has both the missing and new containers.
+ We need to return separate lists for these.
+ */
+
+ /**
+ * Assert that we are able to detect the addition of a new datanode.
+ *
+ * @throws SCMException
+ */
+ @Test
+ public void testProcessReportDetectNewDataNode() throws SCMException {
+ Node2ContainerMap map = new Node2ContainerMap();
+ // If we attempt to process a node that is not present in the map,
+ // we get a result back that says, NEW_NODE_FOUND.
+ UUID key = getFirstKey();
+ TreeSet<ContainerID> values = testData.get(key);
+ ReportResult result = map.processReport(key, values);
+ Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_DATANODE_FOUND,
+ result.getStatus());
+ Assert.assertEquals(result.getNewContainers().size(), values.size());
+ }
+
+ /**
+ * This test asserts that processReport is able to detect new containers
+ * when it is added to a datanode. For that we populate the DN with a list
+ * of containerIDs and then add few more containers and make sure that we
+ * are able to detect them.
+ *
+ * @throws SCMException
+ */
+ @Test
+ public void testProcessReportDetectNewContainers() throws SCMException {
+ Node2ContainerMap map = new Node2ContainerMap();
+ UUID key = getFirstKey();
+ TreeSet<ContainerID> values = testData.get(key);
+ map.insertNewDatanode(key, values);
+
+ final int newCount = 100;
+ // This is not a mistake, the treeset seems to be reverse sorted.
+ ContainerID last = values.pollFirst();
+ TreeSet<ContainerID> addedContainers = new TreeSet<>();
+ for (int x = 1; x <= newCount; x++) {
+ long cTemp = last.getId() + x;
+ addedContainers.add(new ContainerID(cTemp));
+ }
+
+ // This set is the super set of existing containers and new containers.
+ TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
+ newContainersSet.addAll(addedContainers);
+
+ ReportResult result = map.processReport(key, newContainersSet);
+
+ //Assert that expected size of missing container is same as addedContainers
+ Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_CONTAINERS_FOUND,
+ result.getStatus());
+
+ Assert.assertEquals(addedContainers.size(),
+ result.getNewContainers().size());
+
+ // Assert that the Container IDs are the same as we added new.
+ Assert.assertTrue("All objects are not removed.",
+ result.getNewContainers().removeAll(addedContainers));
+ }
+
+ /**
+ * This test asserts that processReport is able to detect missing containers
+ * if they are misssing from a list.
+ *
+ * @throws SCMException
+ */
+ @Test
+ public void testProcessReportDetectMissingContainers() throws SCMException {
+ Node2ContainerMap map = new Node2ContainerMap();
+ UUID key = getFirstKey();
+ TreeSet<ContainerID> values = testData.get(key);
+ map.insertNewDatanode(key, values);
+
+ final int removeCount = 100;
+ Random r = new Random();
+
+ ContainerID first = values.pollLast();
+ TreeSet<ContainerID> removedContainers = new TreeSet<>();
+
+ // Pick a random container to remove it is ok to collide no issues.
+ for (int x = 0; x < removeCount; x++) {
+ int startBase = (int) first.getId();
+ long cTemp = r.nextInt(values.size());
+ removedContainers.add(new ContainerID(cTemp + startBase));
+ }
+
+ // This set is a new set with some containers removed.
+ TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
+ newContainersSet.removeAll(removedContainers);
+
+ ReportResult result = map.processReport(key, newContainersSet);
+
+
+ //Assert that expected size of missing container is same as addedContainers
+ Assert.assertEquals(Node2ContainerMap.ReportStatus.MISSING_CONTAINERS,
+ result.getStatus());
+ Assert.assertEquals(removedContainers.size(),
+ result.getMissingContainers().size());
+
+ // Assert that the Container IDs are the same as we added new.
+ Assert.assertTrue("All missing containers not found.",
+ result.getMissingContainers().removeAll(removedContainers));
+ }
+
+ @Test
+ public void testProcessReportDetectNewAndMissingContainers() throws
+ SCMException {
+ Node2ContainerMap map = new Node2ContainerMap();
+ UUID key = getFirstKey();
+ TreeSet<ContainerID> values = testData.get(key);
+ map.insertNewDatanode(key, values);
+
+ Set<ContainerID> insertedSet = new TreeSet<>();
+ // Insert nodes from 1..30
+ for (int x = 1; x <= 30; x++) {
+ insertedSet.add(new ContainerID(x));
+ }
+
+
+ final int removeCount = 100;
+ Random r = new Random();
+
+ ContainerID first = values.pollLast();
+ TreeSet<ContainerID> removedContainers = new TreeSet<>();
+
+ // Pick a random container to remove it is ok to collide no issues.
+ for (int x = 0; x < removeCount; x++) {
+ int startBase = (int) first.getId();
+ long cTemp = r.nextInt(values.size());
+ removedContainers.add(new ContainerID(cTemp + startBase));
+ }
+
+ Set<ContainerID> newSet = new TreeSet<>(values);
+ newSet.addAll(insertedSet);
+ newSet.removeAll(removedContainers);
+
+ ReportResult result = map.processReport(key, newSet);
+
+
+ Assert.assertEquals(
+ Node2ContainerMap.ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND,
+ result.getStatus());
+ Assert.assertEquals(removedContainers.size(),
+ result.getMissingContainers().size());
+
+
+ // Assert that the Container IDs are the same as we added new.
+ Assert.assertTrue("All missing containers not found.",
+ result.getMissingContainers().removeAll(removedContainers));
+
+ Assert.assertEquals(insertedSet.size(),
+ result.getNewContainers().size());
+
+ // Assert that the Container IDs are the same as we added new.
+ Assert.assertTrue("All inserted containers are not found.",
+ result.getNewContainers().removeAll(insertedSet));
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
new file mode 100644
index 0000000..6610fcd
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * Test Node2Container Map.
+ */
+package org.apache.hadoop.hdds.scm.node.states;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1194ec31/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
index 090f1a7..8ba19fc 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.genesis;
+import org.apache.hadoop.conf.StorageUnit;
import org.openjdk.jmh.infra.BenchmarkParams;
import org.openjdk.jmh.infra.IterationParams;
import org.openjdk.jmh.profile.InternalProfiler;
@@ -46,7 +47,8 @@ public class GenesisMemoryProfiler implements InternalProfiler {
long totalHeap = Runtime.getRuntime().totalMemory();
Collection<ScalarResult> samples = new ArrayList<>();
- samples.add(new ScalarResult("Max heap", totalHeap, "bytes",
+ samples.add(new ScalarResult("Max heap",
+ StorageUnit.BYTES.toGBs(totalHeap), "GBs",
AggregationPolicy.MAX));
return samples;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[44/50] [abbrv] hadoop git commit: HDFS-11700.
TestHDFSServerPorts#testBackupNodePorts doesn't pass on Windows. Contributed
by Anbang Hu.
Posted by xy...@apache.org.
HDFS-11700. TestHDFSServerPorts#testBackupNodePorts doesn't pass on Windows. Contributed by Anbang Hu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eaca7980
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eaca7980
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eaca7980
Branch: refs/heads/HDDS-4
Commit: eaca7980ad2a750f6b0025a706062740c2ddded2
Parents: 92ebd46
Author: Inigo Goiri <in...@apache.org>
Authored: Tue May 15 10:27:36 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue May 15 10:27:36 2018 -0700
----------------------------------------------------------------------
.../test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java | 5 +++++
1 file changed, 5 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/eaca7980/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index 59e8555..12d9253 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -381,6 +381,11 @@ public class TestHDFSServerPorts {
assertFalse("Backup started on same port as Namenode",
canStartBackupNode(backup_config)); // should fail
+ // reset namenode backup address because Windows does not release
+ // port used previously properly.
+ backup_config.set(
+ DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
+
// bind http server to a different port
backup_config.set(
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/50] [abbrv] hadoop git commit: HDDS-39. Ozone: Compile
Ozone/HDFS/Cblock protobuf files with proto3 compiler using maven protoc
plugin. Contributed by Mukul Kumar Singh.
Posted by xy...@apache.org.
HDDS-39. Ozone: Compile Ozone/HDFS/Cblock protobuf files with proto3 compiler using maven protoc plugin.
Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1d64d60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1d64d60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1d64d60
Branch: refs/heads/HDDS-4
Commit: c1d64d60f6ef3cb9ed89669501ca5b1efbab3c28
Parents: 3a93af7
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 11 11:08:45 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri May 11 11:08:45 2018 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdds/scm/XceiverClient.java | 16 +++---
.../hadoop/hdds/scm/XceiverClientHandler.java | 10 ++--
.../hdds/scm/XceiverClientInitializer.java | 18 ++++---
.../hadoop/hdds/scm/XceiverClientMetrics.java | 2 +-
.../hadoop/hdds/scm/XceiverClientRatis.java | 18 +++----
.../scm/client/ContainerOperationClient.java | 5 +-
.../hdds/scm/storage/ChunkInputStream.java | 6 +--
.../hdds/scm/storage/ChunkOutputStream.java | 8 +--
hadoop-hdds/common/pom.xml | 52 +++++++++++++++++++-
.../org/apache/hadoop/hdds/client/BlockID.java | 2 +-
.../hadoop/hdds/scm/XceiverClientSpi.java | 4 +-
.../hadoop/hdds/scm/client/ScmClient.java | 3 +-
.../helpers/StorageContainerException.java | 2 +-
.../scm/storage/ContainerProtocolCalls.java | 40 ++++++++-------
.../container/common/helpers/ChunkInfo.java | 2 +-
.../ozone/container/common/helpers/KeyData.java | 2 +-
.../com/google/protobuf/ShadedProtoUtil.java | 38 --------------
.../com/google/protobuf/package-info.java | 22 ---------
.../main/proto/DatanodeContainerProtocol.proto | 2 +-
.../container/common/helpers/ChunkUtils.java | 36 +++++++-------
.../container/common/helpers/ContainerData.java | 4 +-
.../common/helpers/ContainerMetrics.java | 2 +-
.../common/helpers/ContainerUtils.java | 6 +--
.../container/common/helpers/FileUtils.java | 4 +-
.../container/common/helpers/KeyUtils.java | 10 ++--
.../container/common/impl/ChunkManagerImpl.java | 10 ++--
.../common/impl/ContainerManagerImpl.java | 42 ++++++++--------
.../ozone/container/common/impl/Dispatcher.java | 26 +++++-----
.../container/common/impl/KeyManagerImpl.java | 6 +--
.../common/interfaces/ChunkManager.java | 2 +-
.../common/interfaces/ContainerDispatcher.java | 4 +-
.../background/BlockDeletingService.java | 5 +-
.../common/transport/server/XceiverServer.java | 15 +++---
.../transport/server/XceiverServerHandler.java | 8 +--
.../server/XceiverServerInitializer.java | 18 ++++---
.../server/ratis/ContainerStateMachine.java | 25 ++++------
.../scm/cli/container/InfoContainerHandler.java | 5 +-
.../ozone/client/io/ChunkGroupInputStream.java | 2 +-
.../ozone/client/io/ChunkGroupOutputStream.java | 2 +-
.../client/io/OzoneContainerTranslation.java | 2 +-
.../ozone/container/ContainerTestHelper.java | 10 ++--
.../common/TestBlockDeletingService.java | 2 +-
.../common/impl/TestContainerPersistence.java | 6 +--
.../container/metrics/TestContainerMetrics.java | 8 +--
.../container/ozoneimpl/TestOzoneContainer.java | 3 +-
.../container/server/TestContainerServer.java | 10 ++--
.../ozone/scm/TestContainerSmallFile.java | 2 +-
.../ozone/scm/TestXceiverClientMetrics.java | 12 ++---
.../hadoop/ozone/web/client/TestKeys.java | 2 +-
.../genesis/BenchMarkDatanodeDispatcher.java | 27 ++++++----
hadoop-project/pom.xml | 11 +++--
51 files changed, 292 insertions(+), 287 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
index 5c702c6..6d33cd4 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
@@ -20,18 +20,18 @@ package org.apache.hadoop.hdds.scm;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import io.netty.bootstrap.Bootstrap;
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioSocketChannel;
-import io.netty.handler.logging.LogLevel;
-import io.netty.handler.logging.LoggingHandler;
+import org.apache.ratis.shaded.io.netty.bootstrap.Bootstrap;
+import org.apache.ratis.shaded.io.netty.channel.Channel;
+import org.apache.ratis.shaded.io.netty.channel.EventLoopGroup;
+import org.apache.ratis.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import org.apache.ratis.shaded.io.netty.channel.socket.nio.NioSocketChannel;
+import org.apache.ratis.shaded.io.netty.handler.logging.LogLevel;
+import org.apache.ratis.shaded.io.netty.handler.logging.LoggingHandler;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.slf4j.Logger;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
index e2b55ac..6a2286c 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
@@ -18,13 +18,13 @@
package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
+import org.apache.ratis.shaded.io.netty.channel.Channel;
+import org.apache.ratis.shaded.io.netty.channel.ChannelHandlerContext;
+import org.apache.ratis.shaded.io.netty.channel.SimpleChannelInboundHandler;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
index e10a9f6..90e2f5a 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
@@ -17,15 +17,17 @@
*/
package org.apache.hadoop.hdds.scm;
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.ChannelPipeline;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufEncoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
+import org.apache.ratis.shaded.io.netty.channel.ChannelInitializer;
+import org.apache.ratis.shaded.io.netty.channel.ChannelPipeline;
+import org.apache.ratis.shaded.io.netty.channel.socket.SocketChannel;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf.ProtobufEncoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf
+ .ProtobufVarint32FrameDecoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf
+ .ProtobufVarint32LengthFieldPrepender;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import java.util.concurrent.Semaphore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
index a61eba1..fbc348c 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index d010c69..0effa8f 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -19,14 +19,15 @@
package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.ratis.shaded.com.google.protobuf
+ .InvalidProtocolBufferException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.ratis.RatisHelper;
@@ -37,7 +38,6 @@ import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.rpc.RpcType;
import org.apache.ratis.rpc.SupportedRpcType;
import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -211,8 +211,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
private RaftClientReply sendRequest(ContainerCommandRequestProto request)
throws IOException {
boolean isReadOnlyRequest = isReadOnly(request);
- ByteString byteString =
- ShadedProtoUtil.asShadedByteString(request.toByteArray());
+ ByteString byteString = request.toByteString();
LOG.debug("sendCommand {} {}", isReadOnlyRequest, request);
final RaftClientReply reply = isReadOnlyRequest ?
getClient().sendReadOnly(() -> byteString) :
@@ -224,8 +223,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
private CompletableFuture<RaftClientReply> sendRequestAsync(
ContainerCommandRequestProto request) throws IOException {
boolean isReadOnlyRequest = isReadOnly(request);
- ByteString byteString =
- ShadedProtoUtil.asShadedByteString(request.toByteArray());
+ ByteString byteString = request.toByteString();
LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request);
return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) :
getClient().sendAsync(() -> byteString);
@@ -237,7 +235,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
final RaftClientReply reply = sendRequest(request);
Preconditions.checkState(reply.isSuccess());
return ContainerCommandResponseProto.parseFrom(
- ShadedProtoUtil.asByteString(reply.getMessage().getContent()));
+ reply.getMessage().getContent());
}
/**
@@ -257,7 +255,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
.thenApply(reply -> {
try {
return ContainerCommandResponseProto.parseFrom(
- ShadedProtoUtil.asByteString(reply.getMessage().getContent()));
+ reply.getMessage().getContent());
} catch (InvalidProtocolBufferException e) {
throw new CompletionException(e);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 15d197c..07f6cec 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -25,8 +25,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ReadContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
index c4c3362..020c684 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.hdds.scm.storage;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ReadChunkResponseProto;
import org.apache.hadoop.hdds.client.BlockID;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 8fce00d..779e636 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -18,13 +18,13 @@
package org.apache.hadoop.hdds.scm.storage;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.hdds.client.BlockID;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index bf53042..6310df1 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -78,8 +78,59 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</dependencies>
<build>
+ <extensions>
+ <extension>
+ <groupId>kr.motd.maven</groupId>
+ <artifactId>os-maven-plugin</artifactId>
+ <version>${os-maven-plugin.version}</version>
+ </extension>
+ </extensions>
<plugins>
<plugin>
+ <groupId>org.xolstice.maven.plugins</groupId>
+ <artifactId>protobuf-maven-plugin</artifactId>
+ <version>${protobuf-maven-plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <protocArtifact>
+ com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier}
+ </protocArtifact>
+ <protoSourceRoot>${basedir}/src/main/proto/</protoSourceRoot>
+ <includes>
+ <include>DatanodeContainerProtocol.proto</include>
+ </includes>
+ <outputDirectory>target/generated-sources/java</outputDirectory>
+ <clearOutputDirectory>false</clearOutputDirectory>
+ </configuration>
+ <executions>
+ <execution>
+ <id>compile-protoc</id>
+ <goals>
+ <goal>compile</goal>
+ <goal>test-compile</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <phase>generate-sources</phase>
+ <configuration>
+ <tasks>
+ <replace token="com.google.protobuf" value="org.apache.ratis.shaded.com.google.protobuf"
+ dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/datanode/proto">
+ </replace>
+ </tasks>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<executions>
@@ -107,7 +158,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>StorageContainerLocationProtocol.proto</include>
- <include>DatanodeContainerProtocol.proto</include>
<include>hdds.proto</include>
<include>ScmBlockLocationProtocol.proto</include>
</includes>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
index 355a36d..7bf8f01 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.hdds.client;
import org.apache.commons.lang.builder.ToStringBuilder;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
index c96f79b..56cc741 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
@@ -21,9 +21,9 @@ package org.apache.hadoop.hdds.scm;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index dcf9fed..b52819a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdds.scm.client;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerData;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
index 35d8444..f1405ff 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.container.common.helpers;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index ca388d9..5fbf373 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -18,39 +18,41 @@
package org.apache.hadoop.hdds.scm.storage;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .GetKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.GetKeyResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.GetSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.GetSmallFileResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .PutKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ReadChunkResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ReadContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ReadContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.hdds.client.BlockID;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
index 7cf95a9..21916b5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.ozone.container.common.helpers;
import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import java.io.IOException;
import java.util.Map;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
index c485c7f..129e4a8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.ozone.container.common.helpers;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.client.BlockID;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java
deleted file mode 100644
index 29242ad..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ratis.shaded.com.google.protobuf;
-
-/** Utilities for the shaded protobuf in Ratis. */
-public interface ShadedProtoUtil {
- /**
- * @param bytes
- * @return the wrapped shaded {@link ByteString} (no coping).
- */
- static ByteString asShadedByteString(byte[] bytes) {
- return ByteString.wrap(bytes);
- }
-
- /**
- * @param shaded
- * @return a {@link com.google.protobuf.ByteString} (require coping).
- */
- static com.google.protobuf.ByteString asByteString(ByteString shaded) {
- return com.google.protobuf.ByteString.copyFrom(
- shaded.asReadOnlyByteBuffer());
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java
deleted file mode 100644
index 032dd96..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ratis.shaded.com.google.protobuf;
-
-/**
- * This package contains classes related to the shaded protobuf in Apache Ratis.
- */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 80bc22d..3479866 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -24,7 +24,7 @@
// This file contains protocol buffers that are used to transfer data
// to and from the datanode.
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
+option java_package = "org.apache.hadoop.hdds.protocol.datanode.proto";
option java_outer_classname = "ContainerProtos";
option java_generate_equals_and_hash = true;
package hadoop.hdds.datanode;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
index 8c5609d..eba8594 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.ozone.container.common.helpers;
import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.commons.codec.binary.Hex;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
@@ -40,22 +40,22 @@ import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.ExecutionException;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .CHECKSUM_MISMATCH;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .CONTAINER_NOT_FOUND;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .INVALID_WRITE_SIZE;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .IO_EXCEPTION;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .OVERWRITE_FLAG_REQUIRED;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .UNABLE_TO_FIND_CHUNK;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .UNABLE_TO_FIND_DATA_DIR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.CHECKSUM_MISMATCH;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.CONTAINER_INTERNAL_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.CONTAINER_NOT_FOUND;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.INVALID_WRITE_SIZE;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.IO_EXCEPTION;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.OVERWRITE_FLAG_REQUIRED;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.UNABLE_TO_FIND_CHUNK;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.UNABLE_TO_FIND_DATA_DIR;
/**
* Set of utility functions used by the chunk Manager.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 947dc7d..63111c8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.ozone.container.common.helpers;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerLifeCycleState;
import org.apache.hadoop.ozone.OzoneConsts;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index d4d732b..4300b2d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.container.common.helpers;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 959d88c..9b52316 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
@@ -42,9 +42,9 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import static org.apache.commons.io.FilenameUtils.removeExtension;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result
.INVALID_ARGUMENT;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result
.UNABLE_TO_FIND_DATA_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
index ec27452..a2875be 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.ozone.container.common.helpers;
import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
/**
* File Utils are helper routines used by putSmallFile and getSmallFile
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
index dbd5772..f831d45 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
@@ -21,17 +21,17 @@ import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
import org.apache.hadoop.utils.MetadataStore;
import java.io.IOException;
import java.nio.charset.Charset;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .NO_SUCH_KEY;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .UNABLE_TO_READ_METADATA_DB;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.NO_SUCH_KEY;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.UNABLE_TO_READ_METADATA_DB;
/**
* Utils functions to help key functions.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
index 3505196..fa82026 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
@@ -21,7 +21,7 @@ import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
@@ -40,10 +40,10 @@ import java.nio.file.StandardCopyOption;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.ExecutionException;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .UNSUPPORTED_REQUEST;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.CONTAINER_INTERNAL_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.UNSUPPORTED_REQUEST;
/**
* An implementation of ChunkManager that is used by default in ozone.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index cb60334..240beba 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto
@@ -79,26 +79,26 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .CONTAINER_EXISTS;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .CONTAINER_INTERNAL_ERROR;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .CONTAINER_NOT_FOUND;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .ERROR_IN_COMPACT_DB;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .INVALID_CONFIG;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .IO_EXCEPTION;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .NO_SUCH_ALGORITHM;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .UNABLE_TO_READ_METADATA_DB;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .UNCLOSED_CONTAINER_IO;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .UNSUPPORTED_REQUEST;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.CONTAINER_EXISTS;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.CONTAINER_INTERNAL_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.CONTAINER_NOT_FOUND;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.ERROR_IN_COMPACT_DB;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.INVALID_CONFIG;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.IO_EXCEPTION;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.NO_SUCH_ALGORITHM;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.UNABLE_TO_READ_METADATA_DB;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.UNCLOSED_CONTAINER_IO;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
index 8d1b17c..3b478cd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
@@ -19,16 +19,16 @@
package org.apache.hadoop.ozone.container.common.impl;
import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils;
@@ -48,14 +48,14 @@ import java.security.NoSuchAlgorithmException;
import java.util.LinkedList;
import java.util.List;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .CLOSED_CONTAINER_IO;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .GET_SMALL_FILE_ERROR;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .NO_SUCH_ALGORITHM;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .PUT_SMALL_FILE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.CLOSED_CONTAINER_IO;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.GET_SMALL_FILE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.NO_SUCH_ALGORITHM;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.PUT_SMALL_FILE_ERROR;
/**
* Ozone Container dispatcher takes a call from the netty server and routes it
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
index f920ded..0ca7354 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
@@ -23,7 +23,7 @@ import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
@@ -40,8 +40,8 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
- .NO_SUCH_KEY;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .Result.NO_SUCH_KEY;
/**
* Key Manager impl.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
index 26dcf21..c58fb9d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.common.interfaces;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
index 984fe41..7e12614 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.ozone.container.common.interfaces;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
index 7c3fa30..99845fa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
@@ -19,13 +19,14 @@
package org.apache.hadoop.ozone.container.common.statemachine.background;
import com.google.common.collect.Lists;
-import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.ratis.shaded.com.google.protobuf
+ .InvalidProtocolBufferException;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
index 50e45b4..7105fd7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
@@ -19,13 +19,14 @@
package org.apache.hadoop.ozone.container.common.transport.server;
import com.google.common.base.Preconditions;
-import io.netty.bootstrap.ServerBootstrap;
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioServerSocketChannel;
-import io.netty.handler.logging.LogLevel;
-import io.netty.handler.logging.LoggingHandler;
+import org.apache.ratis.shaded.io.netty.bootstrap.ServerBootstrap;
+import org.apache.ratis.shaded.io.netty.channel.Channel;
+import org.apache.ratis.shaded.io.netty.channel.EventLoopGroup;
+import org.apache.ratis.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import org.apache.ratis.shaded.io.netty.channel.socket.nio
+ .NioServerSocketChannel;
+import org.apache.ratis.shaded.io.netty.handler.logging.LogLevel;
+import org.apache.ratis.shaded.io.netty.handler.logging.LoggingHandler;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
index 5947dde..3765299 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.ozone.container.common.transport.server;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.ratis.shaded.io.netty.channel.ChannelHandlerContext;
+import org.apache.ratis.shaded.io.netty.channel.SimpleChannelInboundHandler;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
import org.slf4j.Logger;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
index 78ba26b..e405cf9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java
@@ -19,14 +19,16 @@
package org.apache.hadoop.ozone.container.common.transport.server;
import com.google.common.base.Preconditions;
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.ChannelPipeline;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufEncoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.ratis.shaded.io.netty.channel.ChannelInitializer;
+import org.apache.ratis.shaded.io.netty.channel.ChannelPipeline;
+import org.apache.ratis.shaded.io.netty.channel.socket.SocketChannel;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf.ProtobufEncoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf
+ .ProtobufVarint32FrameDecoder;
+import org.apache.ratis.shaded.io.netty.handler.codec.protobuf
+ .ProtobufVarint32LengthFieldPrepender;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 89eaace..56c52bb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -19,13 +19,14 @@
package org.apache.hadoop.ozone.container.common.transport.server.ratis;
import com.google.common.base.Preconditions;
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.ratis.shaded.com.google.protobuf
+ .InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.WriteChunkRequestProto;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
import org.apache.ratis.conf.RaftProperties;
@@ -34,7 +35,6 @@ import org.apache.ratis.protocol.RaftClientRequest;
import org.apache.ratis.protocol.RaftPeerId;
import org.apache.ratis.server.storage.RaftStorage;
import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil;
import org.apache.ratis.shaded.proto.RaftProtos.LogEntryProto;
import org.apache.ratis.shaded.proto.RaftProtos.SMLogEntryProto;
import org.apache.ratis.statemachine.StateMachineStorage;
@@ -159,8 +159,8 @@ public class ContainerStateMachine extends BaseStateMachine {
.build();
log = SMLogEntryProto.newBuilder()
- .setData(getShadedByteString(commitContainerCommandProto))
- .setStateMachineData(getShadedByteString(dataContainerCommandProto))
+ .setData(commitContainerCommandProto.toByteString())
+ .setStateMachineData(dataContainerCommandProto.toByteString())
.build();
} else if (proto.getCmdType() == ContainerProtos.Type.CreateContainer) {
log = SMLogEntryProto.newBuilder()
@@ -175,21 +175,16 @@ public class ContainerStateMachine extends BaseStateMachine {
return new TransactionContextImpl(this, request, log);
}
- private ByteString getShadedByteString(ContainerCommandRequestProto proto) {
- return ShadedProtoUtil.asShadedByteString(proto.toByteArray());
- }
-
private ContainerCommandRequestProto getRequestProto(ByteString request)
throws InvalidProtocolBufferException {
- return ContainerCommandRequestProto.parseFrom(
- ShadedProtoUtil.asByteString(request));
+ return ContainerCommandRequestProto.parseFrom(request);
}
private Message runCommand(ContainerCommandRequestProto requestProto) {
LOG.trace("dispatch {}", requestProto);
ContainerCommandResponseProto response = dispatcher.dispatch(requestProto);
LOG.trace("response {}", response);
- return () -> ShadedProtoUtil.asShadedByteString(response.toByteArray());
+ return () -> response.toByteString();
}
private CompletableFuture<Message> handleWriteChunk(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index cefa28c..6027bec 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -26,8 +26,9 @@ import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerLifeCycleState;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index ccc5911..e1a2918 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.client.io;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.Seekable;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 41ceee4..c6e56b3 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.client.io;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
index e7215ef..e74fffd 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.ozone.client.io;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdds.client.BlockID;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index fed725c..d2a6434 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -19,19 +19,19 @@
package org.apache.hadoop.ozone.container;
import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.commons.codec.binary.Hex;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index a60da21..56fd0b1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index e51c3f7..a7cab4e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -22,7 +22,7 @@ import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -36,9 +36,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.MetadataStore;
import org.junit.After;
import org.junit.AfterClass;
@@ -77,7 +75,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
import static org.apache.hadoop.ozone.container.ContainerTestHelper
.setDataChecksum;
-import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Stage.COMBINED;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index 0bba5c1..2921be2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -26,9 +26,11 @@ import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandResponseProto;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 4e1d14b..513974a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.scm.XceiverClient;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index b207914..eb170ea 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -18,11 +18,13 @@
package org.apache.hadoop.ozone.container.server;
-import io.netty.channel.embedded.EmbeddedChannel;
+import org.apache.ratis.shaded.io.netty.channel.embedded.EmbeddedChannel;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandResponseProto;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index f56d78c..ce1fe46 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ozone.scm;
import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d64d60/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
index 99742c2..d6f5d32 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
@@ -26,24 +26,22 @@ import java.util.ArrayList;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
-import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+ .ContainerCommandResponseProto;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/50] [abbrv] hadoop git commit: HDDS-43: Rename hdsl to hdds in
hadoop-ozone/acceptance-test/README.md. Contributed by Sandeep Nemuri
Posted by xy...@apache.org.
HDDS-43: Rename hdsl to hdds in hadoop-ozone/acceptance-test/README.md. Contributed by Sandeep Nemuri
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84b305f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84b305f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84b305f1
Branch: refs/heads/HDDS-4
Commit: 84b305f11a67e6f420e33e1ec30640b8214997e1
Parents: 30293f6
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Thu May 10 17:24:40 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Thu May 10 17:24:40 2018 -0700
----------------------------------------------------------------------
hadoop-ozone/acceptance-test/README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84b305f1/hadoop-ozone/acceptance-test/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/README.md b/hadoop-ozone/acceptance-test/README.md
index 07d10fb..3a0ca49 100644
--- a/hadoop-ozone/acceptance-test/README.md
+++ b/hadoop-ozone/acceptance-test/README.md
@@ -12,7 +12,7 @@
limitations under the License. See accompanying LICENSE file.
-->
-# Acceptance test suite for Ozone/Hdsl
+# Acceptance test suite for Ozone/Hdds
This project contains acceptance tests for ozone/hdds using docker-compose and [robot framework](http://robotframework.org/).
@@ -20,7 +20,7 @@ This project contains acceptance tests for ozone/hdds using docker-compose and [
To run the acceptance tests, please activate the `ozone-acceptance-test` profile and do a full build.
-Typically you need a `mvn install -Phdsl,ozone-acceptance-test,dist -DskipTests` for a build without unit tests but with acceptance test.
+Typically you need a `mvn install -Phdds,ozone-acceptance-test,dist -DskipTests` for a build without unit tests but with acceptance test.
Notes:
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/50] [abbrv] hadoop git commit: HDDS-47. Add acceptance tests for
Ozone Shell. Contributed by Lokesh Jain.
Posted by xy...@apache.org.
HDDS-47. Add acceptance tests for Ozone Shell.
Contributed by Lokesh Jain.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a93af73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a93af73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a93af73
Branch: refs/heads/HDDS-4
Commit: 3a93af731ee09307b6f07e0fc739d1b5653cf69d
Parents: d50c4d7
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 11 10:20:04 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri May 11 10:20:04 2018 -0700
----------------------------------------------------------------------
.../test/robotframework/acceptance/ozone.robot | 24 ++++++++++++++++++--
.../hadoop/ozone/client/OzoneClientUtils.java | 1 +
2 files changed, 23 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a93af73/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
index 1a9cee7..211ec4c 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
@@ -52,9 +52,29 @@ Test ozone cli
Execute on datanode ozone oz -createVolume http://ksm/hive -user bilbo -quota 100TB -root
${result} = Execute on datanode ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
Should contain ${result} createdOn
+ Execute on datanode ozone oz -updateVolume http://ksm/hive -user bill -quota 10TB
+ ${result} = Execute on datanode ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+ Should Be Equal ${result} bill
+ ${result} = Execute on datanode ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
+ Should Be Equal ${result} 10
Execute on datanode ozone oz -createBucket http://ksm/hive/bb1
- ${result} Execute on datanode ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+ ${result} = Execute on datanode ozone oz -infoBucket http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+ Should Be Equal ${result} DISK
+ ${result} = Execute on datanode ozone oz -updateBucket http://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+ Should Be Equal ${result} GROUP
+ ${result} = Execute on datanode ozone oz -updateBucket http://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+ Should Be Equal ${result} USER
+ ${result} = Execute on datanode ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
Should Be Equal ${result} hive
+ Execute on datanode ozone oz -putKey http://ksm/hive/bb1/key1 -file NOTICE.txt
+ Execute on datanode rm -f NOTICE.txt.1
+ Execute on datanode ozone oz -getKey http://ksm/hive/bb1/key1 -file NOTICE.txt.1
+ Execute on datanode ls -l NOTICE.txt.1
+ ${result} = Execute on datanode ozone oz -infoKey http://ksm/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
+ Should contain ${result} createdOn
+ ${result} = Execute on datanode ozone oz -listKey o3://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+ Should Be Equal ${result} key1
+ Execute on datanode ozone oz -deleteKey http://ksm/hive/bb1/key1 -v
Execute on datanode ozone oz -deleteBucket http://ksm/hive/bb1
Execute on datanode ozone oz -deleteVolume http://ksm/hive -user bilbo
@@ -106,12 +126,12 @@ Scale datanodes up
Execute on
[arguments] ${componentname} ${command}
${rc} ${return} = Run docker compose exec ${componentname} ${command}
- Log ${return}
[return] ${return}
Run docker compose
[arguments] ${command}
Set Environment Variable HADOOPDIR ${basedir}/../../hadoop-dist/target/hadoop-${version}
${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/target/compose/docker-compose.yaml ${command}
+ Log ${output}
Should Be Equal As Integers ${rc} 0
[return] ${rc} ${output}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a93af73/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
index 5c83d9b..6be61e2 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
@@ -47,6 +47,7 @@ public final class OzoneClientUtils {
bucketInfo.setStorageType(bucket.getStorageType());
bucketInfo.setVersioning(
OzoneConsts.Versioning.getVersioning(bucket.getVersioning()));
+ bucketInfo.setAcls(bucket.getAcls());
return bucketInfo;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[39/50] [abbrv] hadoop git commit: YARN-8236. Invalid kerberos
principal file name cause NPE in native service. Contributed by Gour Saha.
Posted by xy...@apache.org.
YARN-8236. Invalid kerberos principal file name cause NPE in native service. Contributed by Gour Saha.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58b97c79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58b97c79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58b97c79
Branch: refs/heads/HDDS-4
Commit: 58b97c79e34901938d59acc84ed48c1f9344996a
Parents: ffb9210
Author: Sunil G <su...@apache.org>
Authored: Tue May 15 12:17:35 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue May 15 12:17:35 2018 +0530
----------------------------------------------------------------------
.../yarn/service/client/ServiceClient.java | 46 +++++++++++---------
.../exceptions/RestApiErrorMessages.java | 2 +
.../yarn/service/utils/ServiceApiUtil.java | 38 ++++++++++------
.../hadoop/yarn/service/TestServiceApiUtil.java | 41 +++++++++++++++++
4 files changed, 93 insertions(+), 34 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58b97c79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 67306d2..364a94c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1065,7 +1065,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
LOG.warn("No Kerberos principal name specified for " + service.getName());
return;
}
- if(StringUtils.isEmpty(service.getKerberosPrincipal().getKeytab())) {
+ if (StringUtils.isEmpty(service.getKerberosPrincipal().getKeytab())) {
LOG.warn("No Kerberos keytab specified for " + service.getName());
return;
}
@@ -1077,27 +1077,31 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
throw new YarnException(e);
}
- switch (keytabURI.getScheme()) {
- case "hdfs":
- Path keytabOnhdfs = new Path(keytabURI);
- if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) {
- LOG.warn(service.getName() + "'s keytab (principalName = " +
- principalName + ") doesn't exist at: " + keytabOnhdfs);
- return;
+ if (keytabURI.getScheme() != null) {
+ switch (keytabURI.getScheme()) {
+ case "hdfs":
+ Path keytabOnhdfs = new Path(keytabURI);
+ if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) {
+ LOG.warn(service.getName() + "'s keytab (principalName = "
+ + principalName + ") doesn't exist at: " + keytabOnhdfs);
+ return;
+ }
+ LocalResource keytabRes = fileSystem.createAmResource(keytabOnhdfs,
+ LocalResourceType.FILE);
+ localResource.put(String.format(YarnServiceConstants.KEYTAB_LOCATION,
+ service.getName()), keytabRes);
+ LOG.info("Adding " + service.getName() + "'s keytab for "
+ + "localization, uri = " + keytabOnhdfs);
+ break;
+ case "file":
+ LOG.info("Using a keytab from localhost: " + keytabURI);
+ break;
+ default:
+ LOG.warn("Unsupported keytab URI scheme " + keytabURI);
+ break;
}
- LocalResource keytabRes =
- fileSystem.createAmResource(keytabOnhdfs, LocalResourceType.FILE);
- localResource.put(String.format(YarnServiceConstants.KEYTAB_LOCATION,
- service.getName()), keytabRes);
- LOG.debug("Adding " + service.getName() + "'s keytab for " +
- "localization, uri = " + keytabOnhdfs);
- break;
- case "file":
- LOG.debug("Using a keytab from localhost: " + keytabURI);
- break;
- default:
- LOG.warn("Unsupported URI scheme " + keytabURI);
- break;
+ } else {
+ LOG.warn("Unsupported keytab URI scheme " + keytabURI);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58b97c79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 6b2b8af..0e42533 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -103,4 +103,6 @@ public interface RestApiErrorMessages {
+ "expression element name %s specified in placement policy of component "
+ "%s. Expression element names should be a valid constraint name or an "
+ "expression name defined for this component only.";
+ String ERROR_KEYTAB_URI_SCHEME_INVALID = "Unsupported keytab URI scheme: %s";
+ String ERROR_KEYTAB_URI_INVALID = "Invalid keytab URI: %s";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58b97c79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index a4e5c0d..6e62c56 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -29,13 +29,14 @@ import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.service.api.records.Container;
-import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.api.records.Artifact;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.Configuration;
+import org.apache.hadoop.yarn.service.api.records.Container;
+import org.apache.hadoop.yarn.service.api.records.KerberosPrincipal;
import org.apache.hadoop.yarn.service.api.records.PlacementConstraint;
import org.apache.hadoop.yarn.service.api.records.Resource;
+import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.exceptions.SliderException;
import org.apache.hadoop.yarn.service.conf.RestApiConstants;
import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages;
@@ -111,14 +112,7 @@ public class ServiceApiUtil {
}
if (UserGroupInformation.isSecurityEnabled()) {
- if (!StringUtils.isEmpty(service.getKerberosPrincipal().getKeytab())) {
- try {
- // validate URI format
- new URI(service.getKerberosPrincipal().getKeytab());
- } catch (URISyntaxException e) {
- throw new IllegalArgumentException(e);
- }
- }
+ validateKerberosPrincipal(service.getKerberosPrincipal());
}
// Validate the Docker client config.
@@ -145,9 +139,8 @@ public class ServiceApiUtil {
throw new IllegalArgumentException("Component name collision: " +
comp.getName());
}
- // If artifact is of type SERVICE (which cannot be filled from
- // global), read external service and add its components to this
- // service
+ // If artifact is of type SERVICE (which cannot be filled from global),
+ // read external service and add its components to this service
if (comp.getArtifact() != null && comp.getArtifact().getType() ==
Artifact.TypeEnum.SERVICE) {
if (StringUtils.isEmpty(comp.getArtifact().getId())) {
@@ -226,6 +219,25 @@ public class ServiceApiUtil {
}
}
+ public static void validateKerberosPrincipal(
+ KerberosPrincipal kerberosPrincipal) throws IOException {
+ if (!StringUtils.isEmpty(kerberosPrincipal.getKeytab())) {
+ try {
+ // validate URI format
+ URI keytabURI = new URI(kerberosPrincipal.getKeytab());
+ if (keytabURI.getScheme() == null) {
+ throw new IllegalArgumentException(String.format(
+ RestApiErrorMessages.ERROR_KEYTAB_URI_SCHEME_INVALID,
+ kerberosPrincipal.getKeytab()));
+ }
+ } catch (URISyntaxException e) {
+ throw new IllegalArgumentException(
+ String.format(RestApiErrorMessages.ERROR_KEYTAB_URI_INVALID,
+ e.getLocalizedMessage()));
+ }
+ }
+ }
+
private static void validateDockerClientConfiguration(Service service,
org.apache.hadoop.conf.Configuration conf) throws IOException {
String dockerClientConfig = service.getDockerClientConfig();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58b97c79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
index d195b2c..b209bbb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.service.api.records.Artifact;
import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.KerberosPrincipal;
import org.apache.hadoop.yarn.service.api.records.PlacementConstraint;
import org.apache.hadoop.yarn.service.api.records.PlacementPolicy;
import org.apache.hadoop.yarn.service.api.records.Resource;
@@ -45,6 +46,7 @@ import static org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIM
import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
/**
* Test for ServiceApiUtil helper methods.
@@ -525,4 +527,43 @@ public class TestServiceApiUtil {
Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
}
}
+
+ @Test
+ public void testKerberosPrincipal() throws IOException {
+ SliderFileSystem sfs = ServiceTestUtils.initMockFs();
+ Service app = createValidApplication("comp-a");
+ KerberosPrincipal kp = new KerberosPrincipal();
+ kp.setKeytab("/some/path");
+ kp.setPrincipalName("user/_HOST@domain.com");
+ app.setKerberosPrincipal(kp);
+
+ try {
+ ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal());
+ Assert.fail(EXCEPTION_PREFIX + "service with invalid keytab URI scheme");
+ } catch (IllegalArgumentException e) {
+ assertEquals(
+ String.format(RestApiErrorMessages.ERROR_KEYTAB_URI_SCHEME_INVALID,
+ kp.getKeytab()),
+ e.getMessage());
+ }
+
+ kp.setKeytab("/ blank / in / paths");
+ try {
+ ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal());
+ Assert.fail(EXCEPTION_PREFIX + "service with invalid keytab");
+ } catch (IllegalArgumentException e) {
+ // strip out the %s at the end of the RestApiErrorMessages string constant
+ assertTrue(e.getMessage().contains(
+ RestApiErrorMessages.ERROR_KEYTAB_URI_INVALID.substring(0,
+ RestApiErrorMessages.ERROR_KEYTAB_URI_INVALID.length() - 2)));
+ }
+
+ kp.setKeytab("file:///tmp/a.keytab");
+ // now it should succeed
+ try {
+ ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal());
+ } catch (IllegalArgumentException e) {
+ Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org